|
@@ -105,7 +105,7 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
|
|
|
messages_ids=message_file_ids
|
|
|
)
|
|
|
|
|
|
- # recale llm max tokens
|
|
|
+ # recalc llm max tokens
|
|
|
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
|
|
|
# invoke model
|
|
|
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(
|