|
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
|
|
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
|
|
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(
|
|
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(
|