Selaa lähdekoodia

fix: max token not exist in generate summary when calc rest tokens (#891)

takatost 1 vuosi sitten
vanhempi
commit
2f7b234cc5
1 muutettua tiedostoa jossa 1 lisäystä ja 0 poistoa
  1. 1 0
      api/core/generator/llm_generator.py

+ 1 - 0
api/core/generator/llm_generator.py

@@ -51,6 +51,7 @@ class LLMGenerator:
         prompt_with_empty_context = prompt.format(context='')
         prompt_tokens = model_instance.get_num_tokens([PromptMessage(content=prompt_with_empty_context)])
         max_context_token_length = model_instance.model_rules.max_tokens.max
+        max_context_token_length = max_context_token_length if max_context_token_length else 1500
         rest_tokens = max_context_token_length - prompt_tokens - max_tokens - 1
 
         context = ''