MuYu 7 месяцев назад
Родитель
Сommit
a03919c3b3

+ 1 - 0
api/core/model_runtime/model_providers/hunyuan/llm/_position.yaml

@@ -3,3 +3,4 @@
 - hunyuan-standard-256k
 - hunyuan-pro
 - hunyuan-turbo
+- hunyuan-vision

+ 39 - 0
api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-vision.yaml

@@ -0,0 +1,39 @@
+model: hunyuan-vision
+label:
+  zh_Hans: hunyuan-vision
+  en_US: hunyuan-vision
+model_type: llm
+features:
+  - agent-thought
+  - tool-call
+  - multi-tool-call
+  - stream-tool-call
+  - vision
+model_properties:
+  mode: chat
+  context_size: 8000
+parameter_rules:
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: max_tokens
+    use_template: max_tokens
+    default: 1024
+    min: 1
+    max: 8000
+  - name: enable_enhance
+    label:
+      zh_Hans: 功能增强
+      en_US: Enable Enhancement
+    type: boolean
+    help:
+      zh_Hans: 功能增强(如搜索)开关,关闭时将直接由主模型生成回复内容,可以降低响应时延(对于流式输出时的首字时延尤为明显)。但在少数场景里,回复效果可能会下降。
+      en_US: Allow the model to perform external search to enhance the generation results.
+    required: false
+    default: true
+pricing:
+  input: '0.018'
+  output: '0.018'
+  unit: '0.001'
+  currency: RMB

+ 23 - 0
api/core/model_runtime/model_providers/hunyuan/llm/llm.py

@@ -1,6 +1,7 @@
 import json
 import logging
 from collections.abc import Generator
+from typing import cast
 
 from tencentcloud.common import credential
 from tencentcloud.common.exception import TencentCloudSDKException
@@ -11,9 +12,12 @@ from tencentcloud.hunyuan.v20230901 import hunyuan_client, models
 from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
 from core.model_runtime.entities.message_entities import (
     AssistantPromptMessage,
+    ImagePromptMessageContent,
     PromptMessage,
+    PromptMessageContentType,
     PromptMessageTool,
     SystemPromptMessage,
+    TextPromptMessageContent,
     ToolPromptMessage,
     UserPromptMessage,
 )
@@ -143,6 +147,25 @@ class HunyuanLargeLanguageModel(LargeLanguageModel):
                 tool_execute_result = {"result": message.content}
                 content = json.dumps(tool_execute_result, ensure_ascii=False)
                 dict_list.append({"Role": message.role.value, "Content": content, "ToolCallId": message.tool_call_id})
+            elif isinstance(message, UserPromptMessage):
+                message = cast(UserPromptMessage, message)
+                if isinstance(message.content, str):
+                    dict_list.append({"Role": message.role.value, "Content": message.content})
+                else:
+                    sub_messages = []
+                    for message_content in message.content:
+                        if message_content.type == PromptMessageContentType.TEXT:
+                            message_content = cast(TextPromptMessageContent, message_content)
+                            sub_message_dict = {"Type": "text", "Text": message_content.data}
+                            sub_messages.append(sub_message_dict)
+                        elif message_content.type == PromptMessageContentType.IMAGE:
+                            message_content = cast(ImagePromptMessageContent, message_content)
+                            sub_message_dict = {
+                                "Type": "image_url",
+                                "ImageUrl": {"Url": message_content.data},
+                            }
+                            sub_messages.append(sub_message_dict)
+                    dict_list.append({"Role": message.role.value, "Contents": sub_messages})
             else:
                 dict_list.append({"Role": message.role.value, "Content": message.content})
         return dict_list