浏览代码

Fix the situation where output_tokens/input_tokens may be None in response.usage (#10728)

Ding Jiatong 5 月之前
父节点
当前提交
3087913b74
共有 1 个文件被更改,包括 7 次插入8 次删除
  1. 7 8
      api/core/model_runtime/model_providers/anthropic/llm/llm.py

+ 7 - 8
api/core/model_runtime/model_providers/anthropic/llm/llm.py

@@ -325,14 +325,13 @@ class AnthropicLargeLanguageModel(LargeLanguageModel):
                 assistant_prompt_message.tool_calls.append(tool_call)
 
         # calculate num tokens
-        if response.usage:
-            # transform usage
-            prompt_tokens = response.usage.input_tokens
-            completion_tokens = response.usage.output_tokens
-        else:
-            # calculate num tokens
-            prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages)
-            completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message])
+        prompt_tokens = (response.usage and response.usage.input_tokens) or self.get_num_tokens(
+            model, credentials, prompt_messages
+        )
+
+        completion_tokens = (response.usage and response.usage.output_tokens) or self.get_num_tokens(
+            model, credentials, [assistant_prompt_message]
+        )
 
         # transform usage
         usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)