Przeglądaj źródła

Non-Streaming Models Do Not Return Results Properly in _handle_invoke_result (#13571)

Co-authored-by: crazywoola <427733928@qq.com>
Vasu Negi 2 miesięcy temu
rodzic
commit
8a0aa91ed7
1 zmienionych plików z 18 dodań i 0 usunięć
  1. 18 0
      api/core/workflow/nodes/llm/node.py

+ 18 - 0
api/core/workflow/nodes/llm/node.py

@@ -247,6 +247,24 @@ class LLMNode(BaseNode[LLMNodeData]):
 
     def _handle_invoke_result(self, invoke_result: LLMResult | Generator) -> Generator[NodeEvent, None, None]:
         if isinstance(invoke_result, LLMResult):
+            content = invoke_result.message.content
+            if content is None:
+                message_text = ""
+            elif isinstance(content, str):
+                message_text = content
+            elif isinstance(content, list):
+                # Assuming the list contains PromptMessageContent objects with a "data" attribute
+                message_text = "".join(
+                    item.data if hasattr(item, "data") and isinstance(item.data, str) else str(item) for item in content
+                )
+            else:
+                message_text = str(content)
+
+            yield ModelInvokeCompletedEvent(
+                text=message_text,
+                usage=invoke_result.usage,
+                finish_reason=None,
+            )
             return
 
         model = None