Jelajahi Sumber

feat: add qwen2.5 for model provider siliconflow (#8630)

zhuhao 7 bulan lalu
induk
melakukan
45c0a44411

+ 4 - 0
api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml

@@ -1,3 +1,7 @@
+- Qwen/Qwen2.5-7B-Instruct
+- Qwen/Qwen2.5-14B-Instruct
+- Qwen/Qwen2.5-32B-Instruct
+- Qwen/Qwen2.5-72B-Instruct
 - Qwen/Qwen2-72B-Instruct
 - Qwen/Qwen2-57B-A14B-Instruct
 - Qwen/Qwen2-7B-Instruct

+ 30 - 0
api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-14b-instruct.yaml

@@ -0,0 +1,30 @@
+model: Qwen/Qwen2.5-14B-Instruct
+label:
+  en_US: Qwen/Qwen2.5-14B-Instruct
+model_type: llm
+features:
+  - agent-thought
+model_properties:
+  mode: chat
+  context_size: 32768
+parameter_rules:
+  - name: temperature
+    use_template: temperature
+  - name: max_tokens
+    use_template: max_tokens
+    type: int
+    default: 512
+    min: 1
+    max: 8192
+    help:
+      zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
+      en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
+  - name: top_p
+    use_template: top_p
+  - name: frequency_penalty
+    use_template: frequency_penalty
+pricing:
+  input: '0.7'
+  output: '0.7'
+  unit: '0.000001'
+  currency: RMB

+ 30 - 0
api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-32b-instruct.yaml

@@ -0,0 +1,30 @@
+model: Qwen/Qwen2.5-32B-Instruct
+label:
+  en_US: Qwen/Qwen2.5-32B-Instruct
+model_type: llm
+features:
+  - agent-thought
+model_properties:
+  mode: chat
+  context_size: 32768
+parameter_rules:
+  - name: temperature
+    use_template: temperature
+  - name: max_tokens
+    use_template: max_tokens
+    type: int
+    default: 512
+    min: 1
+    max: 8192
+    help:
+      zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
+      en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
+  - name: top_p
+    use_template: top_p
+  - name: frequency_penalty
+    use_template: frequency_penalty
+pricing:
+  input: '1.26'
+  output: '1.26'
+  unit: '0.000001'
+  currency: RMB

+ 30 - 0
api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-72b-instruct.yaml

@@ -0,0 +1,30 @@
+model: Qwen/Qwen2.5-72B-Instruct
+label:
+  en_US: Qwen/Qwen2.5-72B-Instruct
+model_type: llm
+features:
+  - agent-thought
+model_properties:
+  mode: chat
+  context_size: 32768
+parameter_rules:
+  - name: temperature
+    use_template: temperature
+  - name: max_tokens
+    use_template: max_tokens
+    type: int
+    default: 512
+    min: 1
+    max: 8192
+    help:
+      zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
+      en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
+  - name: top_p
+    use_template: top_p
+  - name: frequency_penalty
+    use_template: frequency_penalty
+pricing:
+  input: '4.13'
+  output: '4.13'
+  unit: '0.000001'
+  currency: RMB

+ 30 - 0
api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-7b-instruct.yaml

@@ -0,0 +1,30 @@
+model: Qwen/Qwen2.5-7B-Instruct
+label:
+  en_US: Qwen/Qwen2.5-7B-Instruct
+model_type: llm
+features:
+  - agent-thought
+model_properties:
+  mode: chat
+  context_size: 32768
+parameter_rules:
+  - name: temperature
+    use_template: temperature
+  - name: max_tokens
+    use_template: max_tokens
+    type: int
+    default: 512
+    min: 1
+    max: 8192
+    help:
+      zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
+      en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
+  - name: top_p
+    use_template: top_p
+  - name: frequency_penalty
+    use_template: frequency_penalty
+pricing:
+  input: '0'
+  output: '0'
+  unit: '0.000001'
+  currency: RMB