瀏覽代碼

fix: deduct LLM quota after processing invoke result (#13075)

Signed-off-by: -LAN- <laipz8200@outlook.com>
-LAN- 2 月之前
父節點
當前提交
b47669b80b
共有 1 個文件被更改,包括 3 次插入11 次删除
  1. 3 11
      api/core/workflow/nodes/llm/node.py

+ 3 - 11
api/core/workflow/nodes/llm/node.py

@@ -185,6 +185,8 @@ class LLMNode(BaseNode[LLMNodeData]):
                     result_text = event.text
                     usage = event.usage
                     finish_reason = event.finish_reason
+                    # deduct quota
+                    self.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
                     break
         except LLMNodeError as e:
             yield RunCompletedEvent(
@@ -240,17 +242,7 @@ class LLMNode(BaseNode[LLMNodeData]):
             user=self.user_id,
         )
 
-        # handle invoke result
-        generator = self._handle_invoke_result(invoke_result=invoke_result)
-
-        usage = LLMUsage.empty_usage()
-        for event in generator:
-            yield event
-            if isinstance(event, ModelInvokeCompletedEvent):
-                usage = event.usage
-
-        # deduct quota
-        self.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
+        return self._handle_invoke_result(invoke_result=invoke_result)
 
     def _handle_invoke_result(self, invoke_result: LLMResult | Generator) -> Generator[NodeEvent, None, None]:
         if isinstance(invoke_result, LLMResult):