Bläddra i källkod

chore: format get_customizable_model_schema return value (#9335)

ice yao 6 månader sedan
förälder
incheckning
1e829ceaf3
28 ändrade filer med 33 tillägg och 30 borttagningar
  1. 1 1
      api/core/model_runtime/docs/en_US/customizable_model_scale_out.md
  2. 1 1
      api/core/model_runtime/docs/zh_Hans/customizable_model_scale_out.md
  3. 1 1
      api/core/model_runtime/model_providers/azure_ai_studio/llm/llm.py
  4. 1 1
      api/core/model_runtime/model_providers/azure_ai_studio/rerank/rerank.py
  5. 1 1
      api/core/model_runtime/model_providers/huggingface_tei/rerank/rerank.py
  6. 1 1
      api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py
  7. 2 2
      api/core/model_runtime/model_providers/localai/llm/llm.py
  8. 1 1
      api/core/model_runtime/model_providers/localai/speech2text/speech2text.py
  9. 1 1
      api/core/model_runtime/model_providers/localai/text_embedding/text_embedding.py
  10. 1 1
      api/core/model_runtime/model_providers/moonshot/llm/llm.py
  11. 1 1
      api/core/model_runtime/model_providers/openai/speech2text/speech2text.py
  12. 1 1
      api/core/model_runtime/model_providers/openai_api_compatible/speech2text/speech2text.py
  13. 2 1
      api/core/model_runtime/model_providers/openllm/llm/llm.py
  14. 1 1
      api/core/model_runtime/model_providers/sagemaker/llm/llm.py
  15. 1 1
      api/core/model_runtime/model_providers/sagemaker/rerank/rerank.py
  16. 1 1
      api/core/model_runtime/model_providers/sagemaker/speech2text/speech2text.py
  17. 1 1
      api/core/model_runtime/model_providers/sagemaker/text_embedding/text_embedding.py
  18. 1 1
      api/core/model_runtime/model_providers/sagemaker/tts/tts.py
  19. 1 1
      api/core/model_runtime/model_providers/siliconflow/llm/llm.py
  20. 1 1
      api/core/model_runtime/model_providers/stepfun/llm/llm.py
  21. 1 1
      api/core/model_runtime/model_providers/tongyi/llm/llm.py
  22. 2 1
      api/core/model_runtime/model_providers/triton_inference_server/llm/llm.py
  23. 2 1
      api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py
  24. 2 2
      api/core/model_runtime/model_providers/xinference/llm/llm.py
  25. 1 1
      api/core/model_runtime/model_providers/xinference/rerank/rerank.py
  26. 1 1
      api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py
  27. 1 1
      api/core/model_runtime/model_providers/xinference/text_embedding/text_embedding.py
  28. 1 1
      api/core/model_runtime/model_providers/xinference/tts/tts.py

+ 1 - 1
api/core/model_runtime/docs/en_US/customizable_model_scale_out.md

@@ -218,7 +218,7 @@ For instance, Xinference supports `max_tokens`, `temperature`, and `top_p` param
 However, some vendors may support different parameters for different models. For example, the `OpenLLM` vendor supports `top_k`, but not all models provided by this vendor support `top_k`. Let's say model A supports `top_k` but model B does not. In such cases, we need to dynamically generate the model parameter schema, as illustrated below:
 
 ```python
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
             used to define customizable model schema
         """

+ 1 - 1
api/core/model_runtime/docs/zh_Hans/customizable_model_scale_out.md

@@ -205,7 +205,7 @@ provider_credential_schema:
   但是有的供应商根据不同的模型支持不同的参数,如供应商`OpenLLM`支持`top_k`,但是并不是这个供应商提供的所有模型都支持`top_k`,我们这里举例A模型支持`top_k`,B模型不支持`top_k`,那么我们需要在这里动态生成模型参数的Schema,如下所示:
   
     ```python
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
             used to define customizable model schema
         """

+ 1 - 1
api/core/model_runtime/model_providers/azure_ai_studio/llm/llm.py

@@ -294,7 +294,7 @@ class AzureAIStudioLargeLanguageModel(LargeLanguageModel):
             ],
         }
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         Used to define customizable model schema
         """

+ 1 - 1
api/core/model_runtime/model_providers/azure_ai_studio/rerank/rerank.py

@@ -148,7 +148,7 @@ class AzureRerankModel(RerankModel):
             InvokeBadRequestError: [InvokeBadRequestError, KeyError, ValueError, json.JSONDecodeError],
         }
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """

+ 1 - 1
api/core/model_runtime/model_providers/huggingface_tei/rerank/rerank.py

@@ -118,7 +118,7 @@ class HuggingfaceTeiRerankModel(RerankModel):
             InvokeBadRequestError: [InvokeBadRequestError, KeyError, ValueError],
         }
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """

+ 1 - 1
api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py

@@ -189,7 +189,7 @@ class HuggingfaceTeiTextEmbeddingModel(TextEmbeddingModel):
 
         return usage
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """

+ 2 - 2
api/core/model_runtime/model_providers/localai/llm/llm.py

@@ -1,5 +1,5 @@
 from collections.abc import Generator
-from typing import cast
+from typing import Optional, cast
 
 from httpx import Timeout
 from openai import (
@@ -212,7 +212,7 @@ class LocalAILanguageModel(LargeLanguageModel):
         except Exception as ex:
             raise CredentialsValidateFailedError(f"Invalid credentials {str(ex)}")
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         completion_model = None
         if credentials["completion_type"] == "chat_completion":
             completion_model = LLMMode.CHAT.value

+ 1 - 1
api/core/model_runtime/model_providers/localai/speech2text/speech2text.py

@@ -73,7 +73,7 @@ class LocalAISpeech2text(Speech2TextModel):
             InvokeBadRequestError: [InvokeBadRequestError],
         }
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """

+ 1 - 1
api/core/model_runtime/model_providers/localai/text_embedding/text_embedding.py

@@ -115,7 +115,7 @@ class LocalAITextEmbeddingModel(TextEmbeddingModel):
             num_tokens += self._get_num_tokens_by_gpt2(text)
         return num_tokens
 
-    def _get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def _get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         Get customizable model schema
 

+ 1 - 1
api/core/model_runtime/model_providers/moonshot/llm/llm.py

@@ -50,7 +50,7 @@ class MoonshotLargeLanguageModel(OAIAPICompatLargeLanguageModel):
         self._add_custom_parameters(credentials)
         super().validate_credentials(model, credentials)
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         return AIModelEntity(
             model=model,
             label=I18nObject(en_US=model, zh_Hans=model),

+ 1 - 1
api/core/model_runtime/model_providers/openai/speech2text/speech2text.py

@@ -61,7 +61,7 @@ class OpenAISpeech2TextModel(_CommonOpenAI, Speech2TextModel):
 
         return response.text
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """

+ 1 - 1
api/core/model_runtime/model_providers/openai_api_compatible/speech2text/speech2text.py

@@ -62,7 +62,7 @@ class OAICompatSpeech2TextModel(_CommonOaiApiCompat, Speech2TextModel):
         except Exception as ex:
             raise CredentialsValidateFailedError(str(ex))
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """

+ 2 - 1
api/core/model_runtime/model_providers/openllm/llm/llm.py

@@ -1,4 +1,5 @@
 from collections.abc import Generator
+from typing import Optional
 
 from core.model_runtime.entities.common_entities import I18nObject
 from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta
@@ -193,7 +194,7 @@ class OpenLLMLargeLanguageModel(LargeLanguageModel):
                     ),
                 )
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """

+ 1 - 1
api/core/model_runtime/model_providers/sagemaker/llm/llm.py

@@ -408,7 +408,7 @@ class SageMakerLargeLanguageModel(LargeLanguageModel):
             InvokeBadRequestError: [InvokeBadRequestError, KeyError, ValueError],
         }
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """

+ 1 - 1
api/core/model_runtime/model_providers/sagemaker/rerank/rerank.py

@@ -157,7 +157,7 @@ class SageMakerRerankModel(RerankModel):
             InvokeBadRequestError: [InvokeBadRequestError, KeyError, ValueError],
         }
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """

+ 1 - 1
api/core/model_runtime/model_providers/sagemaker/speech2text/speech2text.py

@@ -111,7 +111,7 @@ class SageMakerSpeech2TextModel(Speech2TextModel):
             InvokeBadRequestError: [InvokeBadRequestError, KeyError, ValueError],
         }
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """

+ 1 - 1
api/core/model_runtime/model_providers/sagemaker/text_embedding/text_embedding.py

@@ -180,7 +180,7 @@ class SageMakerEmbeddingModel(TextEmbeddingModel):
             InvokeBadRequestError: [KeyError],
         }
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """

+ 1 - 1
api/core/model_runtime/model_providers/sagemaker/tts/tts.py

@@ -159,7 +159,7 @@ class SageMakerText2SpeechModel(TTSModel):
 
         return self._tts_invoke_streaming(model_type, payload, sagemaker_endpoint)
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """

+ 1 - 1
api/core/model_runtime/model_providers/siliconflow/llm/llm.py

@@ -40,7 +40,7 @@ class SiliconflowLargeLanguageModel(OAIAPICompatLargeLanguageModel):
         credentials["mode"] = "chat"
         credentials["endpoint_url"] = "https://api.siliconflow.cn/v1"
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         return AIModelEntity(
             model=model,
             label=I18nObject(en_US=model, zh_Hans=model),

+ 1 - 1
api/core/model_runtime/model_providers/stepfun/llm/llm.py

@@ -50,7 +50,7 @@ class StepfunLargeLanguageModel(OAIAPICompatLargeLanguageModel):
         self._add_custom_parameters(credentials)
         super().validate_credentials(model, credentials)
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         return AIModelEntity(
             model=model,
             label=I18nObject(en_US=model, zh_Hans=model),

+ 1 - 1
api/core/model_runtime/model_providers/tongyi/llm/llm.py

@@ -535,7 +535,7 @@ class TongyiLargeLanguageModel(LargeLanguageModel):
             ],
         }
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         Architecture for defining customizable models
 

+ 2 - 1
api/core/model_runtime/model_providers/triton_inference_server/llm/llm.py

@@ -1,4 +1,5 @@
 from collections.abc import Generator
+from typing import Optional
 
 from httpx import Response, post
 from yarl import URL
@@ -109,7 +110,7 @@ class TritonInferenceAILargeLanguageModel(LargeLanguageModel):
                 raise NotImplementedError(f"PromptMessage type {type(item)} is not supported")
         return text
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """

+ 2 - 1
api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py

@@ -1,5 +1,6 @@
 import logging
 from collections.abc import Generator
+from typing import Optional
 
 from volcenginesdkarkruntime.types.chat import ChatCompletion, ChatCompletionChunk
 
@@ -298,7 +299,7 @@ class VolcengineMaaSLargeLanguageModel(LargeLanguageModel):
         chunks = client.stream_chat(prompt_messages, **req_params)
         return _handle_stream_chat_response(chunks)
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """

+ 2 - 2
api/core/model_runtime/model_providers/xinference/llm/llm.py

@@ -1,5 +1,5 @@
 from collections.abc import Generator, Iterator
-from typing import cast
+from typing import Optional, cast
 
 from openai import (
     APIConnectionError,
@@ -321,7 +321,7 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
 
         return message_dict
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """

+ 1 - 1
api/core/model_runtime/model_providers/xinference/rerank/rerank.py

@@ -142,7 +142,7 @@ class XinferenceRerankModel(RerankModel):
             InvokeBadRequestError: [InvokeBadRequestError, KeyError, ValueError],
         }
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """

+ 1 - 1
api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py

@@ -129,7 +129,7 @@ class XinferenceSpeech2TextModel(Speech2TextModel):
 
         return response["text"]
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """

+ 1 - 1
api/core/model_runtime/model_providers/xinference/text_embedding/text_embedding.py

@@ -184,7 +184,7 @@ class XinferenceTextEmbeddingModel(TextEmbeddingModel):
 
         return usage
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """

+ 1 - 1
api/core/model_runtime/model_providers/xinference/tts/tts.py

@@ -116,7 +116,7 @@ class XinferenceText2SpeechModel(TTSModel):
         """
         return self._tts_invoke_streaming(model, credentials, content_text, voice)
 
-    def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
         """
         used to define customizable model schema
         """