Ver Fonte

feat: add chatgpt-4o-latest (#7289)

非法操作 há 8 meses atrás
pai
commit
5aa373dc04

+ 1 - 0
api/core/model_runtime/model_providers/openai/llm/_position.yaml

@@ -2,6 +2,7 @@
 - gpt-4o
 - gpt-4o-2024-05-13
 - gpt-4o-2024-08-06
+- chatgpt-4o-latest
 - gpt-4o-mini
 - gpt-4o-mini-2024-07-18
 - gpt-4-turbo

+ 44 - 0
api/core/model_runtime/model_providers/openai/llm/chatgpt-4o-latest.yaml

@@ -0,0 +1,44 @@
+model: chatgpt-4o-latest
+label:
+  zh_Hans: chatgpt-4o-latest
+  en_US: chatgpt-4o-latest
+model_type: llm
+features:
+  - multi-tool-call
+  - agent-thought
+  - stream-tool-call
+  - vision
+model_properties:
+  mode: chat
+  context_size: 128000
+parameter_rules:
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: max_tokens
+    use_template: max_tokens
+    default: 512
+    min: 1
+    max: 16384
+  - name: response_format
+    label:
+      zh_Hans: 回复格式
+      en_US: response_format
+    type: string
+    help:
+      zh_Hans: 指定模型必须输出的格式
+      en_US: specifying the format that the model must output
+    required: false
+    options:
+      - text
+      - json_object
+pricing:
+  input: '2.50'
+  output: '10.00'
+  unit: '0.000001'
+  currency: USD

+ 6 - 3
api/core/model_runtime/model_providers/openai/llm/llm.py

@@ -922,11 +922,14 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
                                   tools: Optional[list[PromptMessageTool]] = None) -> int:
         """Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.
 
-        Official documentation: https://github.com/openai/openai-cookbook/blob/
-        main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
+        Official documentation: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
         if model.startswith('ft:'):
             model = model.split(':')[1]
 
+        # Currently, we can use gpt4o to calculate chatgpt-4o-latest's token.
+        if model == "chatgpt-4o-latest":
+            model = "gpt-4o"
+
         try:
             encoding = tiktoken.encoding_for_model(model)
         except KeyError:
@@ -946,7 +949,7 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
             raise NotImplementedError(
                 f"get_num_tokens_from_messages() is not presently implemented "
                 f"for model {model}."
-                "See https://github.com/openai/openai-python/blob/main/chatml.md for "
+                "See https://platform.openai.com/docs/advanced-usage/managing-tokens for "
                 "information on how messages are converted to tokens."
             )
         num_tokens = 0

+ 1 - 1
web/app/components/header/account-setting/model-provider-page/model-icon/index.tsx

@@ -19,7 +19,7 @@ const ModelIcon: FC<ModelIconProps> = ({
 }) => {
   const language = useLanguage()
 
-  if (provider?.provider === 'openai' && modelName?.startsWith('gpt-4'))
+  if (provider?.provider === 'openai' && (modelName?.startsWith('gpt-4') || modelName?.includes('4o')))
     return <OpenaiViolet className={`w-4 h-4 ${className}`}/>
 
   if (provider?.icon_small) {