Browse Source

fix: wenxin model name invalid when llm call (#1248)

takatost 1 year ago
parent
commit
d38eac959b

+ 1 - 0
api/core/model_providers/models/llm/wenxin_model.py

@@ -18,6 +18,7 @@ class WenxinModel(BaseLLM):
         provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs)
         # TODO load price_config from configs(db)
         return Wenxin(
+            model=self.name,
             streaming=self.streaming,
             callbacks=self.callbacks,
             **self.credentials,

+ 6 - 1
api/core/model_providers/providers/wenxin_provider.py

@@ -61,13 +61,18 @@ class WenxinProvider(BaseModelProvider):
         :param model_type:
         :return:
         """
+        model_max_tokens = {
+            'ernie-bot': 4800,
+            'ernie-bot-turbo': 11200,
+        }
+
         if model_name in ['ernie-bot', 'ernie-bot-turbo']:
             return ModelKwargsRules(
                 temperature=KwargRule[float](min=0.01, max=1, default=0.95, precision=2),
                 top_p=KwargRule[float](min=0.01, max=1, default=0.8, precision=2),
                 presence_penalty=KwargRule[float](enabled=False),
                 frequency_penalty=KwargRule[float](enabled=False),
-                max_tokens=KwargRule[int](enabled=False),
+                max_tokens=KwargRule[int](enabled=False, max=model_max_tokens.get(model_name)),
             )
         else:
             return ModelKwargsRules(