ソースを参照

Fix code indentation errors (#9164)

Charlie.Wei 6 ヶ月 前
コミット
6b6e94da08
1 ファイル変更28 行追加28 行削除
  1. 28 28
      api/core/model_runtime/model_providers/azure_openai/llm/llm.py

+ 28 - 28
api/core/model_runtime/model_providers/azure_openai/llm/llm.py

@@ -312,39 +312,39 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
         if user:
             extra_model_kwargs["user"] = user
 
-            # clear illegal prompt messages
-            prompt_messages = self._clear_illegal_prompt_messages(model, prompt_messages)
-
-            block_as_stream = False
-            if model.startswith("o1"):
-                if stream:
-                    block_as_stream = True
-                    stream = False
-
-                    if "stream_options" in extra_model_kwargs:
-                        del extra_model_kwargs["stream_options"]
-
-                if "stop" in extra_model_kwargs:
-                    del extra_model_kwargs["stop"]
-
-            # chat model
-            response = client.chat.completions.create(
-                messages=[self._convert_prompt_message_to_dict(m) for m in prompt_messages],
-                model=model,
-                stream=stream,
-                **model_parameters,
-                **extra_model_kwargs,
-            )
+        # clear illegal prompt messages
+        prompt_messages = self._clear_illegal_prompt_messages(model, prompt_messages)
 
+        block_as_stream = False
+        if model.startswith("o1"):
             if stream:
-                return self._handle_chat_generate_stream_response(model, credentials, response, prompt_messages, tools)
+                block_as_stream = True
+                stream = False
+
+                if "stream_options" in extra_model_kwargs:
+                    del extra_model_kwargs["stream_options"]
+
+            if "stop" in extra_model_kwargs:
+                del extra_model_kwargs["stop"]
+
+        # chat model
+        response = client.chat.completions.create(
+            messages=[self._convert_prompt_message_to_dict(m) for m in prompt_messages],
+            model=model,
+            stream=stream,
+            **model_parameters,
+            **extra_model_kwargs,
+        )
+
+        if stream:
+            return self._handle_chat_generate_stream_response(model, credentials, response, prompt_messages, tools)
 
-            block_result = self._handle_chat_generate_response(model, credentials, response, prompt_messages, tools)
+        block_result = self._handle_chat_generate_response(model, credentials, response, prompt_messages, tools)
 
-            if block_as_stream:
-                return self._handle_chat_block_as_stream_response(block_result, prompt_messages, stop)
+        if block_as_stream:
+            return self._handle_chat_block_as_stream_response(block_result, prompt_messages, stop)
 
-            return block_result
+        return block_result
 
     def _handle_chat_block_as_stream_response(
         self,