Browse Source

feat: optimize xinference stream (#989)

takatost 1 year ago
parent
commit
18d3877151
1 changed files with 3 additions and 3 deletions
  1. 3 3
      api/core/third_party/langchain/llms/xinference_llm.py

+ 3 - 3
api/core/third_party/langchain/llms/xinference_llm.py

@@ -108,12 +108,12 @@ class XinferenceLLM(Xinference):
         Yields:
             A string token.
         """
-        if isinstance(model, RESTfulGenerateModelHandle):
-            streaming_response = model.generate(
+        if isinstance(model, (RESTfulChatModelHandle, RESTfulChatglmCppChatModelHandle)):
+            streaming_response = model.chat(
                 prompt=prompt, generate_config=generate_config
             )
         else:
-            streaming_response = model.chat(
+            streaming_response = model.generate(
                 prompt=prompt, generate_config=generate_config
             )