فهرست منبع

feat: add yi custom llm intergration (#9482)

zhuhao 6 ماه پیش
والد
کامیت
e0846792d2
2فایلهای تغییر یافته به همراه121 افزوده شده و 1 حذف شده
  1. 66 1
      api/core/model_runtime/model_providers/yi/llm/llm.py
  2. 55 0
      api/core/model_runtime/model_providers/yi/yi.yaml

+ 66 - 1
api/core/model_runtime/model_providers/yi/llm/llm.py

@@ -4,12 +4,22 @@ from urllib.parse import urlparse
 
 import tiktoken
 
-from core.model_runtime.entities.llm_entities import LLMResult
+from core.model_runtime.entities.common_entities import I18nObject
+from core.model_runtime.entities.llm_entities import LLMMode, LLMResult
 from core.model_runtime.entities.message_entities import (
     PromptMessage,
     PromptMessageTool,
     SystemPromptMessage,
 )
+from core.model_runtime.entities.model_entities import (
+    AIModelEntity,
+    FetchFrom,
+    ModelFeature,
+    ModelPropertyKey,
+    ModelType,
+    ParameterRule,
+    ParameterType,
+)
 from core.model_runtime.model_providers.openai.llm.llm import OpenAILargeLanguageModel
 
 
@@ -125,3 +135,58 @@ class YiLargeLanguageModel(OpenAILargeLanguageModel):
         else:
             parsed_url = urlparse(credentials["endpoint_url"])
             credentials["openai_api_base"] = f"{parsed_url.scheme}://{parsed_url.netloc}"
+
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+        return AIModelEntity(
+            model=model,
+            label=I18nObject(en_US=model, zh_Hans=model),
+            model_type=ModelType.LLM,
+            features=[ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL, ModelFeature.STREAM_TOOL_CALL]
+            if credentials.get("function_calling_type") == "tool_call"
+            else [],
+            fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
+            model_properties={
+                ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 8000)),
+                ModelPropertyKey.MODE: LLMMode.CHAT.value,
+            },
+            parameter_rules=[
+                ParameterRule(
+                    name="temperature",
+                    use_template="temperature",
+                    label=I18nObject(en_US="Temperature", zh_Hans="温度"),
+                    type=ParameterType.FLOAT,
+                ),
+                ParameterRule(
+                    name="max_tokens",
+                    use_template="max_tokens",
+                    default=512,
+                    min=1,
+                    max=int(credentials.get("max_tokens", 8192)),
+                    label=I18nObject(
+                        en_US="Max Tokens", zh_Hans="指定生成结果长度的上限。如果生成结果截断,可以调大该参数"
+                    ),
+                    type=ParameterType.INT,
+                ),
+                ParameterRule(
+                    name="top_p",
+                    use_template="top_p",
+                    label=I18nObject(
+                        en_US="Top P",
+                        zh_Hans="控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。",
+                    ),
+                    type=ParameterType.FLOAT,
+                ),
+                ParameterRule(
+                    name="top_k",
+                    use_template="top_k",
+                    label=I18nObject(en_US="Top K", zh_Hans="取样数量"),
+                    type=ParameterType.FLOAT,
+                ),
+                ParameterRule(
+                    name="frequency_penalty",
+                    use_template="frequency_penalty",
+                    label=I18nObject(en_US="Frequency Penalty", zh_Hans="重复惩罚"),
+                    type=ParameterType.FLOAT,
+                ),
+            ],
+        )

+ 55 - 0
api/core/model_runtime/model_providers/yi/yi.yaml

@@ -20,6 +20,7 @@ supported_model_types:
   - llm
 configurate_methods:
   - predefined-model
+  - customizable-model
 provider_credential_schema:
   credential_form_schemas:
     - variable: api_key
@@ -39,3 +40,57 @@ provider_credential_schema:
       placeholder:
         zh_Hans: Base URL, e.g. https://api.lingyiwanwu.com/v1
         en_US: Base URL, e.g. https://api.lingyiwanwu.com/v1
+model_credential_schema:
+  model:
+    label:
+      en_US: Model Name
+      zh_Hans: 模型名称
+    placeholder:
+      en_US: Enter your model name
+      zh_Hans: 输入模型名称
+  credential_form_schemas:
+    - variable: api_key
+      label:
+        en_US: API Key
+      type: secret-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 API Key
+        en_US: Enter your API Key
+    - variable: context_size
+      label:
+        zh_Hans: 模型上下文长度
+        en_US: Model context size
+      required: true
+      type: text-input
+      default: '4096'
+      placeholder:
+        zh_Hans: 在此输入您的模型上下文长度
+        en_US: Enter your Model context size
+    - variable: max_tokens
+      label:
+        zh_Hans: 最大 token 上限
+        en_US: Upper bound for max tokens
+      default: '4096'
+      type: text-input
+      show_on:
+        - variable: __model_type
+          value: llm
+    - variable: function_calling_type
+      label:
+        en_US: Function calling
+      type: select
+      required: false
+      default: no_call
+      options:
+        - value: no_call
+          label:
+            en_US: Not Support
+            zh_Hans: 不支持
+        - value: function_call
+          label:
+            en_US: Support
+            zh_Hans: 支持
+      show_on:
+        - variable: __model_type
+          value: llm