Ver Fonte

fix: title, summary, suggested questions generate (#476)

John Wang há 1 ano atrás
pai
commit
408fbb0c70
2 ficheiros alterados com 15 adições e 12 exclusões
  1. 11 8
      api/core/generator/llm_generator.py
  2. 4 4
      api/core/prompt/prompts.py

+ 11 - 8
api/core/generator/llm_generator.py

@@ -2,7 +2,7 @@ import logging
 
 from langchain import PromptTemplate
 from langchain.chat_models.base import BaseChatModel
-from langchain.schema import HumanMessage, OutputParserException
+from langchain.schema import HumanMessage, OutputParserException, BaseMessage
 
 from core.constant import llm_constant
 from core.llm.llm_builder import LLMBuilder
@@ -23,10 +23,10 @@ class LLMGenerator:
     @classmethod
     def generate_conversation_name(cls, tenant_id: str, query, answer):
         prompt = CONVERSATION_TITLE_PROMPT
-        prompt = prompt.format(query=query, answer=answer)
+        prompt = prompt.format(query=query)
         llm: StreamableOpenAI = LLMBuilder.to_llm(
             tenant_id=tenant_id,
-            model_name=generate_base_model,
+            model_name='gpt-3.5-turbo',
             max_tokens=50
         )
 
@@ -40,11 +40,12 @@ class LLMGenerator:
     @classmethod
     def generate_conversation_summary(cls, tenant_id: str, messages):
         max_tokens = 200
+        model = 'gpt-3.5-turbo'
 
         prompt = CONVERSATION_SUMMARY_PROMPT
         prompt_with_empty_context = prompt.format(context='')
-        prompt_tokens = TokenCalculator.get_num_tokens(generate_base_model, prompt_with_empty_context)
-        rest_tokens = llm_constant.max_context_token_length[generate_base_model] - prompt_tokens - max_tokens
+        prompt_tokens = TokenCalculator.get_num_tokens(model, prompt_with_empty_context)
+        rest_tokens = llm_constant.max_context_token_length[model] - prompt_tokens - max_tokens
 
         context = ''
         for message in messages:
@@ -52,14 +53,14 @@ class LLMGenerator:
                 continue
 
             message_qa_text = "Human:" + message.query + "\nAI:" + message.answer + "\n"
-            if rest_tokens - TokenCalculator.get_num_tokens(generate_base_model, context + message_qa_text) > 0:
+            if rest_tokens - TokenCalculator.get_num_tokens(model, context + message_qa_text) > 0:
                 context += message_qa_text
 
         prompt = prompt.format(context=context)
 
         llm: StreamableOpenAI = LLMBuilder.to_llm(
             tenant_id=tenant_id,
-            model_name=generate_base_model,
+            model_name=model,
             max_tokens=max_tokens
         )
 
@@ -102,7 +103,7 @@ class LLMGenerator:
 
         llm: StreamableOpenAI = LLMBuilder.to_llm(
             tenant_id=tenant_id,
-            model_name=generate_base_model,
+            model_name='gpt-3.5-turbo',
             temperature=0,
             max_tokens=256
         )
@@ -114,6 +115,8 @@ class LLMGenerator:
 
         try:
             output = llm(query)
+            if isinstance(output, BaseMessage):
+                output = output.content
             questions = output_parser.parse(output)
         except Exception:
             logging.exception("Error generating suggested questions after answer")

+ 4 - 4
api/core/prompt/prompts.py

@@ -1,5 +1,5 @@
 CONVERSATION_TITLE_PROMPT = (
-    "Human:{{query}}\n-----\n"
+    "Human:{query}\n-----\n"
     "Help me summarize the intent of what the human said and provide a title, the title should not exceed 20 words.\n"
     "If the human said is conducted in Chinese, you should return a Chinese title.\n" 
     "If the human said is conducted in English, you should return an English title.\n"
@@ -19,7 +19,7 @@ CONVERSATION_SUMMARY_PROMPT = (
 INTRODUCTION_GENERATE_PROMPT = (
     "I am designing a product for users to interact with an AI through dialogue. "
     "The Prompt given to the AI before the conversation is:\n\n"
-    "```\n{{prompt}}\n```\n\n"
+    "```\n{prompt}\n```\n\n"
     "Please generate a brief introduction of no more than 50 words that greets the user, based on this Prompt. "
     "Do not reveal the developer's motivation or deep logic behind the Prompt, "
     "but focus on building a relationship with the user:\n"
@@ -27,13 +27,13 @@ INTRODUCTION_GENERATE_PROMPT = (
 
 MORE_LIKE_THIS_GENERATE_PROMPT = (
     "-----\n"
-    "{{original_completion}}\n"
+    "{original_completion}\n"
     "-----\n\n"
     "Please use the above content as a sample for generating the result, "
     "and include key information points related to the original sample in the result. "
     "Try to rephrase this information in different ways and predict according to the rules below.\n\n"
     "-----\n"
-    "{{prompt}}\n"
+    "{prompt}\n"
 )
 
 SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT = (