assistant_cot_runner.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670
  1. import json
  2. import re
  3. from collections.abc import Generator
  4. from typing import Literal, Union
  5. from core.application_queue_manager import PublishFrom
  6. from core.entities.application_entities import AgentPromptEntity, AgentScratchpadUnit
  7. from core.features.assistant_base_runner import BaseAssistantApplicationRunner
  8. from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
  9. from core.model_runtime.entities.message_entities import (
  10. AssistantPromptMessage,
  11. PromptMessage,
  12. PromptMessageTool,
  13. SystemPromptMessage,
  14. ToolPromptMessage,
  15. UserPromptMessage,
  16. )
  17. from core.model_runtime.utils.encoders import jsonable_encoder
  18. from core.tools.errors import (
  19. ToolInvokeError,
  20. ToolNotFoundError,
  21. ToolNotSupportedError,
  22. ToolParameterValidationError,
  23. ToolProviderCredentialValidationError,
  24. ToolProviderNotFoundError,
  25. )
  26. from models.model import Conversation, Message
  27. class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
  28. _is_first_iteration = True
  29. _ignore_observation_providers = ['wenxin']
  30. def run(self, conversation: Conversation,
  31. message: Message,
  32. query: str,
  33. inputs: dict[str, str],
  34. ) -> Union[Generator, LLMResult]:
  35. """
  36. Run Cot agent application
  37. """
  38. app_orchestration_config = self.app_orchestration_config
  39. self._repack_app_orchestration_config(app_orchestration_config)
  40. agent_scratchpad: list[AgentScratchpadUnit] = []
  41. self._init_agent_scratchpad(agent_scratchpad, self.history_prompt_messages)
  42. if 'Observation' not in app_orchestration_config.model_config.stop:
  43. if app_orchestration_config.model_config.provider not in self._ignore_observation_providers:
  44. app_orchestration_config.model_config.stop.append('Observation')
  45. # override inputs
  46. inputs = inputs or {}
  47. instruction = self.app_orchestration_config.prompt_template.simple_prompt_template
  48. instruction = self._fill_in_inputs_from_external_data_tools(instruction, inputs)
  49. iteration_step = 1
  50. max_iteration_steps = min(self.app_orchestration_config.agent.max_iteration, 5) + 1
  51. prompt_messages = self.history_prompt_messages
  52. # convert tools into ModelRuntime Tool format
  53. prompt_messages_tools: list[PromptMessageTool] = []
  54. tool_instances = {}
  55. for tool in self.app_orchestration_config.agent.tools if self.app_orchestration_config.agent else []:
  56. try:
  57. prompt_tool, tool_entity = self._convert_tool_to_prompt_message_tool(tool)
  58. except Exception:
  59. # api tool may be deleted
  60. continue
  61. # save tool entity
  62. tool_instances[tool.tool_name] = tool_entity
  63. # save prompt tool
  64. prompt_messages_tools.append(prompt_tool)
  65. # convert dataset tools into ModelRuntime Tool format
  66. for dataset_tool in self.dataset_tools:
  67. prompt_tool = self._convert_dataset_retriever_tool_to_prompt_message_tool(dataset_tool)
  68. # save prompt tool
  69. prompt_messages_tools.append(prompt_tool)
  70. # save tool entity
  71. tool_instances[dataset_tool.identity.name] = dataset_tool
  72. function_call_state = True
  73. llm_usage = {
  74. 'usage': None
  75. }
  76. final_answer = ''
  77. def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage):
  78. if not final_llm_usage_dict['usage']:
  79. final_llm_usage_dict['usage'] = usage
  80. else:
  81. llm_usage = final_llm_usage_dict['usage']
  82. llm_usage.prompt_tokens += usage.prompt_tokens
  83. llm_usage.completion_tokens += usage.completion_tokens
  84. llm_usage.prompt_price += usage.prompt_price
  85. llm_usage.completion_price += usage.completion_price
  86. model_instance = self.model_instance
  87. while function_call_state and iteration_step <= max_iteration_steps:
  88. # continue to run until there is not any tool call
  89. function_call_state = False
  90. if iteration_step == max_iteration_steps:
  91. # the last iteration, remove all tools
  92. prompt_messages_tools = []
  93. message_file_ids = []
  94. agent_thought = self.create_agent_thought(
  95. message_id=message.id,
  96. message='',
  97. tool_name='',
  98. tool_input='',
  99. messages_ids=message_file_ids
  100. )
  101. if iteration_step > 1:
  102. self.queue_manager.publish_agent_thought(agent_thought, PublishFrom.APPLICATION_MANAGER)
  103. # update prompt messages
  104. prompt_messages = self._organize_cot_prompt_messages(
  105. mode=app_orchestration_config.model_config.mode,
  106. prompt_messages=prompt_messages,
  107. tools=prompt_messages_tools,
  108. agent_scratchpad=agent_scratchpad,
  109. agent_prompt_message=app_orchestration_config.agent.prompt,
  110. instruction=instruction,
  111. input=query
  112. )
  113. # recalc llm max tokens
  114. self.recalc_llm_max_tokens(self.model_config, prompt_messages)
  115. # invoke model
  116. chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm(
  117. prompt_messages=prompt_messages,
  118. model_parameters=app_orchestration_config.model_config.parameters,
  119. tools=[],
  120. stop=app_orchestration_config.model_config.stop,
  121. stream=True,
  122. user=self.user_id,
  123. callbacks=[],
  124. )
  125. # check llm result
  126. if not chunks:
  127. raise ValueError("failed to invoke llm")
  128. usage_dict = {}
  129. react_chunks = self._handle_stream_react(chunks, usage_dict)
  130. scratchpad = AgentScratchpadUnit(
  131. agent_response='',
  132. thought='',
  133. action_str='',
  134. observation='',
  135. action=None,
  136. )
  137. # publish agent thought if it's first iteration
  138. if iteration_step == 1:
  139. self.queue_manager.publish_agent_thought(agent_thought, PublishFrom.APPLICATION_MANAGER)
  140. for chunk in react_chunks:
  141. if isinstance(chunk, dict):
  142. scratchpad.agent_response += json.dumps(chunk)
  143. try:
  144. if scratchpad.action:
  145. raise Exception("")
  146. scratchpad.action_str = json.dumps(chunk)
  147. scratchpad.action = AgentScratchpadUnit.Action(
  148. action_name=chunk['action'],
  149. action_input=chunk['action_input']
  150. )
  151. except:
  152. scratchpad.thought += json.dumps(chunk)
  153. yield LLMResultChunk(
  154. model=self.model_config.model,
  155. prompt_messages=prompt_messages,
  156. system_fingerprint='',
  157. delta=LLMResultChunkDelta(
  158. index=0,
  159. message=AssistantPromptMessage(
  160. content=json.dumps(chunk, ensure_ascii=False) # if ensure_ascii=True, the text in webui maybe garbled text
  161. ),
  162. usage=None
  163. )
  164. )
  165. else:
  166. scratchpad.agent_response += chunk
  167. scratchpad.thought += chunk
  168. yield LLMResultChunk(
  169. model=self.model_config.model,
  170. prompt_messages=prompt_messages,
  171. system_fingerprint='',
  172. delta=LLMResultChunkDelta(
  173. index=0,
  174. message=AssistantPromptMessage(
  175. content=chunk
  176. ),
  177. usage=None
  178. )
  179. )
  180. scratchpad.thought = scratchpad.thought.strip() or 'I am thinking about how to help you'
  181. agent_scratchpad.append(scratchpad)
  182. # get llm usage
  183. if 'usage' in usage_dict:
  184. increase_usage(llm_usage, usage_dict['usage'])
  185. else:
  186. usage_dict['usage'] = LLMUsage.empty_usage()
  187. self.save_agent_thought(agent_thought=agent_thought,
  188. tool_name=scratchpad.action.action_name if scratchpad.action else '',
  189. tool_input=scratchpad.action.action_input if scratchpad.action else '',
  190. thought=scratchpad.thought,
  191. observation='',
  192. answer=scratchpad.agent_response,
  193. messages_ids=[],
  194. llm_usage=usage_dict['usage'])
  195. if scratchpad.action and scratchpad.action.action_name.lower() != "final answer":
  196. self.queue_manager.publish_agent_thought(agent_thought, PublishFrom.APPLICATION_MANAGER)
  197. if not scratchpad.action:
  198. # failed to extract action, return final answer directly
  199. final_answer = scratchpad.agent_response or ''
  200. else:
  201. if scratchpad.action.action_name.lower() == "final answer":
  202. # action is final answer, return final answer directly
  203. try:
  204. final_answer = scratchpad.action.action_input if \
  205. isinstance(scratchpad.action.action_input, str) else \
  206. json.dumps(scratchpad.action.action_input)
  207. except json.JSONDecodeError:
  208. final_answer = f'{scratchpad.action.action_input}'
  209. else:
  210. function_call_state = True
  211. # action is tool call, invoke tool
  212. tool_call_name = scratchpad.action.action_name
  213. tool_call_args = scratchpad.action.action_input
  214. tool_instance = tool_instances.get(tool_call_name)
  215. if not tool_instance:
  216. answer = f"there is not a tool named {tool_call_name}"
  217. self.save_agent_thought(agent_thought=agent_thought,
  218. tool_name='',
  219. tool_input='',
  220. thought=None,
  221. observation=answer,
  222. answer=answer,
  223. messages_ids=[])
  224. self.queue_manager.publish_agent_thought(agent_thought, PublishFrom.APPLICATION_MANAGER)
  225. else:
  226. # invoke tool
  227. error_response = None
  228. try:
  229. if isinstance(tool_call_args, str):
  230. try:
  231. tool_call_args = json.loads(tool_call_args)
  232. except json.JSONDecodeError:
  233. pass
  234. tool_response = tool_instance.invoke(
  235. user_id=self.user_id,
  236. tool_parameters=tool_call_args
  237. )
  238. # transform tool response to llm friendly response
  239. tool_response = self.transform_tool_invoke_messages(tool_response)
  240. # extract binary data from tool invoke message
  241. binary_files = self.extract_tool_response_binary(tool_response)
  242. # create message file
  243. message_files = self.create_message_files(binary_files)
  244. # publish files
  245. for message_file, save_as in message_files:
  246. if save_as:
  247. self.variables_pool.set_file(tool_name=tool_call_name,
  248. value=message_file.id,
  249. name=save_as)
  250. self.queue_manager.publish_message_file(message_file, PublishFrom.APPLICATION_MANAGER)
  251. message_file_ids = [message_file.id for message_file, _ in message_files]
  252. except ToolProviderCredentialValidationError as e:
  253. error_response = "Please check your tool provider credentials"
  254. except (
  255. ToolNotFoundError, ToolNotSupportedError, ToolProviderNotFoundError
  256. ) as e:
  257. error_response = f"there is not a tool named {tool_call_name}"
  258. except (
  259. ToolParameterValidationError
  260. ) as e:
  261. error_response = f"tool parameters validation error: {e}, please check your tool parameters"
  262. except ToolInvokeError as e:
  263. error_response = f"tool invoke error: {e}"
  264. except Exception as e:
  265. error_response = f"unknown error: {e}"
  266. if error_response:
  267. observation = error_response
  268. else:
  269. observation = self._convert_tool_response_to_str(tool_response)
  270. # save scratchpad
  271. scratchpad.observation = observation
  272. # save agent thought
  273. self.save_agent_thought(
  274. agent_thought=agent_thought,
  275. tool_name=tool_call_name,
  276. tool_input=tool_call_args,
  277. thought=None,
  278. observation=observation,
  279. answer=scratchpad.agent_response,
  280. messages_ids=message_file_ids,
  281. )
  282. self.queue_manager.publish_agent_thought(agent_thought, PublishFrom.APPLICATION_MANAGER)
  283. # update prompt tool message
  284. for prompt_tool in prompt_messages_tools:
  285. self.update_prompt_message_tool(tool_instances[prompt_tool.name], prompt_tool)
  286. iteration_step += 1
  287. yield LLMResultChunk(
  288. model=model_instance.model,
  289. prompt_messages=prompt_messages,
  290. delta=LLMResultChunkDelta(
  291. index=0,
  292. message=AssistantPromptMessage(
  293. content=final_answer
  294. ),
  295. usage=llm_usage['usage']
  296. ),
  297. system_fingerprint=''
  298. )
  299. # save agent thought
  300. self.save_agent_thought(
  301. agent_thought=agent_thought,
  302. tool_name='',
  303. tool_input='',
  304. thought=final_answer,
  305. observation='',
  306. answer=final_answer,
  307. messages_ids=[]
  308. )
  309. self.update_db_variables(self.variables_pool, self.db_variables_pool)
  310. # publish end event
  311. self.queue_manager.publish_message_end(LLMResult(
  312. model=model_instance.model,
  313. prompt_messages=prompt_messages,
  314. message=AssistantPromptMessage(
  315. content=final_answer
  316. ),
  317. usage=llm_usage['usage'] if llm_usage['usage'] else LLMUsage.empty_usage(),
  318. system_fingerprint=''
  319. ), PublishFrom.APPLICATION_MANAGER)
  320. def _handle_stream_react(self, llm_response: Generator[LLMResultChunk, None, None], usage: dict) \
  321. -> Generator[Union[str, dict], None, None]:
  322. def parse_json(json_str):
  323. try:
  324. return json.loads(json_str.strip())
  325. except:
  326. return json_str
  327. def extra_json_from_code_block(code_block) -> Generator[Union[dict, str], None, None]:
  328. code_blocks = re.findall(r'```(.*?)```', code_block, re.DOTALL)
  329. if not code_blocks:
  330. return
  331. for block in code_blocks:
  332. json_text = re.sub(r'^[a-zA-Z]+\n', '', block.strip(), flags=re.MULTILINE)
  333. yield parse_json(json_text)
  334. code_block_cache = ''
  335. code_block_delimiter_count = 0
  336. in_code_block = False
  337. json_cache = ''
  338. json_quote_count = 0
  339. in_json = False
  340. got_json = False
  341. for response in llm_response:
  342. response = response.delta.message.content
  343. if not isinstance(response, str):
  344. continue
  345. # stream
  346. index = 0
  347. while index < len(response):
  348. steps = 1
  349. delta = response[index:index+steps]
  350. if delta == '`':
  351. code_block_cache += delta
  352. code_block_delimiter_count += 1
  353. else:
  354. if not in_code_block:
  355. if code_block_delimiter_count > 0:
  356. yield code_block_cache
  357. code_block_cache = ''
  358. else:
  359. code_block_cache += delta
  360. code_block_delimiter_count = 0
  361. if code_block_delimiter_count == 3:
  362. if in_code_block:
  363. yield from extra_json_from_code_block(code_block_cache)
  364. code_block_cache = ''
  365. in_code_block = not in_code_block
  366. code_block_delimiter_count = 0
  367. if not in_code_block:
  368. # handle single json
  369. if delta == '{':
  370. json_quote_count += 1
  371. in_json = True
  372. json_cache += delta
  373. elif delta == '}':
  374. json_cache += delta
  375. if json_quote_count > 0:
  376. json_quote_count -= 1
  377. if json_quote_count == 0:
  378. in_json = False
  379. got_json = True
  380. index += steps
  381. continue
  382. else:
  383. if in_json:
  384. json_cache += delta
  385. if got_json:
  386. got_json = False
  387. yield parse_json(json_cache)
  388. json_cache = ''
  389. json_quote_count = 0
  390. in_json = False
  391. if not in_code_block and not in_json:
  392. yield delta.replace('`', '')
  393. index += steps
  394. if code_block_cache:
  395. yield code_block_cache
  396. if json_cache:
  397. yield parse_json(json_cache)
  398. def _fill_in_inputs_from_external_data_tools(self, instruction: str, inputs: dict) -> str:
  399. """
  400. fill in inputs from external data tools
  401. """
  402. for key, value in inputs.items():
  403. try:
  404. instruction = instruction.replace(f'{{{{{key}}}}}', str(value))
  405. except Exception as e:
  406. continue
  407. return instruction
  408. def _init_agent_scratchpad(self,
  409. agent_scratchpad: list[AgentScratchpadUnit],
  410. messages: list[PromptMessage]
  411. ) -> list[AgentScratchpadUnit]:
  412. """
  413. init agent scratchpad
  414. """
  415. current_scratchpad: AgentScratchpadUnit = None
  416. for message in messages:
  417. if isinstance(message, AssistantPromptMessage):
  418. current_scratchpad = AgentScratchpadUnit(
  419. agent_response=message.content,
  420. thought=message.content or 'I am thinking about how to help you',
  421. action_str='',
  422. action=None,
  423. observation=None,
  424. )
  425. if message.tool_calls:
  426. try:
  427. current_scratchpad.action = AgentScratchpadUnit.Action(
  428. action_name=message.tool_calls[0].function.name,
  429. action_input=json.loads(message.tool_calls[0].function.arguments)
  430. )
  431. except:
  432. pass
  433. agent_scratchpad.append(current_scratchpad)
  434. elif isinstance(message, ToolPromptMessage):
  435. if current_scratchpad:
  436. current_scratchpad.observation = message.content
  437. return agent_scratchpad
  438. def _check_cot_prompt_messages(self, mode: Literal["completion", "chat"],
  439. agent_prompt_message: AgentPromptEntity,
  440. ):
  441. """
  442. check chain of thought prompt messages, a standard prompt message is like:
  443. Respond to the human as helpfully and accurately as possible.
  444. {{instruction}}
  445. You have access to the following tools:
  446. {{tools}}
  447. Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
  448. Valid action values: "Final Answer" or {{tool_names}}
  449. Provide only ONE action per $JSON_BLOB, as shown:
  450. ```
  451. {
  452. "action": $TOOL_NAME,
  453. "action_input": $ACTION_INPUT
  454. }
  455. ```
  456. """
  457. # parse agent prompt message
  458. first_prompt = agent_prompt_message.first_prompt
  459. next_iteration = agent_prompt_message.next_iteration
  460. if not isinstance(first_prompt, str) or not isinstance(next_iteration, str):
  461. raise ValueError("first_prompt or next_iteration is required in CoT agent mode")
  462. # check instruction, tools, and tool_names slots
  463. if not first_prompt.find("{{instruction}}") >= 0:
  464. raise ValueError("{{instruction}} is required in first_prompt")
  465. if not first_prompt.find("{{tools}}") >= 0:
  466. raise ValueError("{{tools}} is required in first_prompt")
  467. if not first_prompt.find("{{tool_names}}") >= 0:
  468. raise ValueError("{{tool_names}} is required in first_prompt")
  469. if mode == "completion":
  470. if not first_prompt.find("{{query}}") >= 0:
  471. raise ValueError("{{query}} is required in first_prompt")
  472. if not first_prompt.find("{{agent_scratchpad}}") >= 0:
  473. raise ValueError("{{agent_scratchpad}} is required in first_prompt")
  474. if mode == "completion":
  475. if not next_iteration.find("{{observation}}") >= 0:
  476. raise ValueError("{{observation}} is required in next_iteration")
  477. def _convert_scratchpad_list_to_str(self, agent_scratchpad: list[AgentScratchpadUnit]) -> str:
  478. """
  479. convert agent scratchpad list to str
  480. """
  481. next_iteration = self.app_orchestration_config.agent.prompt.next_iteration
  482. result = ''
  483. for scratchpad in agent_scratchpad:
  484. result += (scratchpad.thought or '') + (scratchpad.action_str or '') + \
  485. next_iteration.replace("{{observation}}", scratchpad.observation or 'It seems that no response is available')
  486. return result
  487. def _organize_cot_prompt_messages(self, mode: Literal["completion", "chat"],
  488. prompt_messages: list[PromptMessage],
  489. tools: list[PromptMessageTool],
  490. agent_scratchpad: list[AgentScratchpadUnit],
  491. agent_prompt_message: AgentPromptEntity,
  492. instruction: str,
  493. input: str,
  494. ) -> list[PromptMessage]:
  495. """
  496. organize chain of thought prompt messages, a standard prompt message is like:
  497. Respond to the human as helpfully and accurately as possible.
  498. {{instruction}}
  499. You have access to the following tools:
  500. {{tools}}
  501. Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
  502. Valid action values: "Final Answer" or {{tool_names}}
  503. Provide only ONE action per $JSON_BLOB, as shown:
  504. ```
  505. {{{{
  506. "action": $TOOL_NAME,
  507. "action_input": $ACTION_INPUT
  508. }}}}
  509. ```
  510. """
  511. self._check_cot_prompt_messages(mode, agent_prompt_message)
  512. # parse agent prompt message
  513. first_prompt = agent_prompt_message.first_prompt
  514. # parse tools
  515. tools_str = self._jsonify_tool_prompt_messages(tools)
  516. # parse tools name
  517. tool_names = '"' + '","'.join([tool.name for tool in tools]) + '"'
  518. # get system message
  519. system_message = first_prompt.replace("{{instruction}}", instruction) \
  520. .replace("{{tools}}", tools_str) \
  521. .replace("{{tool_names}}", tool_names)
  522. # organize prompt messages
  523. if mode == "chat":
  524. # override system message
  525. overridden = False
  526. prompt_messages = prompt_messages.copy()
  527. for prompt_message in prompt_messages:
  528. if isinstance(prompt_message, SystemPromptMessage):
  529. prompt_message.content = system_message
  530. overridden = True
  531. break
  532. # convert tool prompt messages to user prompt messages
  533. for idx, prompt_message in enumerate(prompt_messages):
  534. if isinstance(prompt_message, ToolPromptMessage):
  535. prompt_messages[idx] = UserPromptMessage(
  536. content=prompt_message.content
  537. )
  538. if not overridden:
  539. prompt_messages.insert(0, SystemPromptMessage(
  540. content=system_message,
  541. ))
  542. # add assistant message
  543. if len(agent_scratchpad) > 0 and not self._is_first_iteration:
  544. prompt_messages.append(AssistantPromptMessage(
  545. content=(agent_scratchpad[-1].thought or '') + (agent_scratchpad[-1].action_str or ''),
  546. ))
  547. # add user message
  548. if len(agent_scratchpad) > 0 and not self._is_first_iteration:
  549. prompt_messages.append(UserPromptMessage(
  550. content=(agent_scratchpad[-1].observation or 'It seems that no response is available'),
  551. ))
  552. self._is_first_iteration = False
  553. return prompt_messages
  554. elif mode == "completion":
  555. # parse agent scratchpad
  556. agent_scratchpad_str = self._convert_scratchpad_list_to_str(agent_scratchpad)
  557. self._is_first_iteration = False
  558. # parse prompt messages
  559. return [UserPromptMessage(
  560. content=first_prompt.replace("{{instruction}}", instruction)
  561. .replace("{{tools}}", tools_str)
  562. .replace("{{tool_names}}", tool_names)
  563. .replace("{{query}}", input)
  564. .replace("{{agent_scratchpad}}", agent_scratchpad_str),
  565. )]
  566. else:
  567. raise ValueError(f"mode {mode} is not supported")
  568. def _jsonify_tool_prompt_messages(self, tools: list[PromptMessageTool]) -> str:
  569. """
  570. jsonify tool prompt messages
  571. """
  572. tools = jsonable_encoder(tools)
  573. try:
  574. return json.dumps(tools, ensure_ascii=False)
  575. except json.JSONDecodeError:
  576. return json.dumps(tools)