fc_agent_runner.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. import json
  2. import logging
  3. from collections.abc import Generator
  4. from copy import deepcopy
  5. from typing import Any, Optional, Union
  6. from core.agent.base_agent_runner import BaseAgentRunner
  7. from core.app.apps.base_app_queue_manager import PublishFrom
  8. from core.app.entities.queue_entities import QueueAgentThoughtEvent, QueueMessageEndEvent, QueueMessageFileEvent
  9. from core.file import file_manager
  10. from core.model_runtime.entities import (
  11. AssistantPromptMessage,
  12. LLMResult,
  13. LLMResultChunk,
  14. LLMResultChunkDelta,
  15. LLMUsage,
  16. PromptMessage,
  17. PromptMessageContent,
  18. PromptMessageContentType,
  19. SystemPromptMessage,
  20. TextPromptMessageContent,
  21. ToolPromptMessage,
  22. UserPromptMessage,
  23. )
  24. from core.model_runtime.entities.message_entities import ImagePromptMessageContent
  25. from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform
  26. from core.tools.entities.tool_entities import ToolInvokeMeta
  27. from core.tools.tool_engine import ToolEngine
  28. from models.model import Message
  29. logger = logging.getLogger(__name__)
  30. class FunctionCallAgentRunner(BaseAgentRunner):
  31. def run(self, message: Message, query: str, **kwargs: Any) -> Generator[LLMResultChunk, None, None]:
  32. """
  33. Run FunctionCall agent application
  34. """
  35. self.query = query
  36. app_generate_entity = self.application_generate_entity
  37. app_config = self.app_config
  38. assert app_config is not None, "app_config is required"
  39. assert app_config.agent is not None, "app_config.agent is required"
  40. # convert tools into ModelRuntime Tool format
  41. tool_instances, prompt_messages_tools = self._init_prompt_tools()
  42. assert app_config.agent
  43. iteration_step = 1
  44. max_iteration_steps = min(app_config.agent.max_iteration, 5) + 1
  45. # continue to run until there is not any tool call
  46. function_call_state = True
  47. llm_usage: dict[str, Optional[LLMUsage]] = {"usage": None}
  48. final_answer = ""
  49. # get tracing instance
  50. trace_manager = app_generate_entity.trace_manager
  51. def increase_usage(final_llm_usage_dict: dict[str, Optional[LLMUsage]], usage: LLMUsage):
  52. if not final_llm_usage_dict["usage"]:
  53. final_llm_usage_dict["usage"] = usage
  54. else:
  55. llm_usage = final_llm_usage_dict["usage"]
  56. llm_usage.prompt_tokens += usage.prompt_tokens
  57. llm_usage.completion_tokens += usage.completion_tokens
  58. llm_usage.prompt_price += usage.prompt_price
  59. llm_usage.completion_price += usage.completion_price
  60. llm_usage.total_price += usage.total_price
  61. model_instance = self.model_instance
  62. while function_call_state and iteration_step <= max_iteration_steps:
  63. function_call_state = False
  64. if iteration_step == max_iteration_steps:
  65. # the last iteration, remove all tools
  66. prompt_messages_tools = []
  67. message_file_ids: list[str] = []
  68. agent_thought = self.create_agent_thought(
  69. message_id=message.id, message="", tool_name="", tool_input="", messages_ids=message_file_ids
  70. )
  71. # recalc llm max tokens
  72. prompt_messages = self._organize_prompt_messages()
  73. self.recalc_llm_max_tokens(self.model_config, prompt_messages)
  74. # invoke model
  75. chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(
  76. prompt_messages=prompt_messages,
  77. model_parameters=app_generate_entity.model_conf.parameters,
  78. tools=prompt_messages_tools,
  79. stop=app_generate_entity.model_conf.stop,
  80. stream=self.stream_tool_call,
  81. user=self.user_id,
  82. callbacks=[],
  83. )
  84. tool_calls: list[tuple[str, str, dict[str, Any]]] = []
  85. # save full response
  86. response = ""
  87. # save tool call names and inputs
  88. tool_call_names = ""
  89. tool_call_inputs = ""
  90. current_llm_usage = None
  91. if isinstance(chunks, Generator):
  92. is_first_chunk = True
  93. for chunk in chunks:
  94. if is_first_chunk:
  95. self.queue_manager.publish(
  96. QueueAgentThoughtEvent(agent_thought_id=agent_thought.id), PublishFrom.APPLICATION_MANAGER
  97. )
  98. is_first_chunk = False
  99. # check if there is any tool call
  100. if self.check_tool_calls(chunk):
  101. function_call_state = True
  102. tool_calls.extend(self.extract_tool_calls(chunk) or [])
  103. tool_call_names = ";".join([tool_call[1] for tool_call in tool_calls])
  104. try:
  105. tool_call_inputs = json.dumps(
  106. {tool_call[1]: tool_call[2] for tool_call in tool_calls}, ensure_ascii=False
  107. )
  108. except json.JSONDecodeError:
  109. # ensure ascii to avoid encoding error
  110. tool_call_inputs = json.dumps({tool_call[1]: tool_call[2] for tool_call in tool_calls})
  111. if chunk.delta.message and chunk.delta.message.content:
  112. if isinstance(chunk.delta.message.content, list):
  113. for content in chunk.delta.message.content:
  114. response += content.data
  115. else:
  116. response += str(chunk.delta.message.content)
  117. if chunk.delta.usage:
  118. increase_usage(llm_usage, chunk.delta.usage)
  119. current_llm_usage = chunk.delta.usage
  120. yield chunk
  121. else:
  122. result = chunks
  123. # check if there is any tool call
  124. if self.check_blocking_tool_calls(result):
  125. function_call_state = True
  126. tool_calls.extend(self.extract_blocking_tool_calls(result) or [])
  127. tool_call_names = ";".join([tool_call[1] for tool_call in tool_calls])
  128. try:
  129. tool_call_inputs = json.dumps(
  130. {tool_call[1]: tool_call[2] for tool_call in tool_calls}, ensure_ascii=False
  131. )
  132. except json.JSONDecodeError:
  133. # ensure ascii to avoid encoding error
  134. tool_call_inputs = json.dumps({tool_call[1]: tool_call[2] for tool_call in tool_calls})
  135. if result.usage:
  136. increase_usage(llm_usage, result.usage)
  137. current_llm_usage = result.usage
  138. if result.message and result.message.content:
  139. if isinstance(result.message.content, list):
  140. for content in result.message.content:
  141. response += content.data
  142. else:
  143. response += str(result.message.content)
  144. if not result.message.content:
  145. result.message.content = ""
  146. self.queue_manager.publish(
  147. QueueAgentThoughtEvent(agent_thought_id=agent_thought.id), PublishFrom.APPLICATION_MANAGER
  148. )
  149. yield LLMResultChunk(
  150. model=model_instance.model,
  151. prompt_messages=result.prompt_messages,
  152. system_fingerprint=result.system_fingerprint,
  153. delta=LLMResultChunkDelta(
  154. index=0,
  155. message=result.message,
  156. usage=result.usage,
  157. ),
  158. )
  159. assistant_message = AssistantPromptMessage(content="", tool_calls=[])
  160. if tool_calls:
  161. assistant_message.tool_calls = [
  162. AssistantPromptMessage.ToolCall(
  163. id=tool_call[0],
  164. type="function",
  165. function=AssistantPromptMessage.ToolCall.ToolCallFunction(
  166. name=tool_call[1], arguments=json.dumps(tool_call[2], ensure_ascii=False)
  167. ),
  168. )
  169. for tool_call in tool_calls
  170. ]
  171. else:
  172. assistant_message.content = response
  173. self._current_thoughts.append(assistant_message)
  174. # save thought
  175. self.save_agent_thought(
  176. agent_thought=agent_thought,
  177. tool_name=tool_call_names,
  178. tool_input=tool_call_inputs,
  179. thought=response,
  180. tool_invoke_meta=None,
  181. observation=None,
  182. answer=response,
  183. messages_ids=[],
  184. llm_usage=current_llm_usage,
  185. )
  186. self.queue_manager.publish(
  187. QueueAgentThoughtEvent(agent_thought_id=agent_thought.id), PublishFrom.APPLICATION_MANAGER
  188. )
  189. final_answer += response + "\n"
  190. # call tools
  191. tool_responses = []
  192. for tool_call_id, tool_call_name, tool_call_args in tool_calls:
  193. tool_instance = tool_instances.get(tool_call_name)
  194. if not tool_instance:
  195. tool_response = {
  196. "tool_call_id": tool_call_id,
  197. "tool_call_name": tool_call_name,
  198. "tool_response": f"there is not a tool named {tool_call_name}",
  199. "meta": ToolInvokeMeta.error_instance(f"there is not a tool named {tool_call_name}").to_dict(),
  200. }
  201. else:
  202. # invoke tool
  203. tool_invoke_response, message_files, tool_invoke_meta = ToolEngine.agent_invoke(
  204. tool=tool_instance,
  205. tool_parameters=tool_call_args,
  206. user_id=self.user_id,
  207. tenant_id=self.tenant_id,
  208. message=self.message,
  209. invoke_from=self.application_generate_entity.invoke_from,
  210. agent_tool_callback=self.agent_callback,
  211. trace_manager=trace_manager,
  212. app_id=self.application_generate_entity.app_config.app_id,
  213. message_id=self.message.id,
  214. conversation_id=self.conversation.id,
  215. )
  216. # publish files
  217. for message_file_id in message_files:
  218. # publish message file
  219. self.queue_manager.publish(
  220. QueueMessageFileEvent(message_file_id=message_file_id), PublishFrom.APPLICATION_MANAGER
  221. )
  222. # add message file ids
  223. message_file_ids.append(message_file_id)
  224. tool_response = {
  225. "tool_call_id": tool_call_id,
  226. "tool_call_name": tool_call_name,
  227. "tool_response": tool_invoke_response,
  228. "meta": tool_invoke_meta.to_dict(),
  229. }
  230. tool_responses.append(tool_response)
  231. if tool_response["tool_response"] is not None:
  232. self._current_thoughts.append(
  233. ToolPromptMessage(
  234. content=str(tool_response["tool_response"]),
  235. tool_call_id=tool_call_id,
  236. name=tool_call_name,
  237. )
  238. )
  239. if len(tool_responses) > 0:
  240. # save agent thought
  241. self.save_agent_thought(
  242. agent_thought=agent_thought,
  243. tool_name="",
  244. tool_input="",
  245. thought="",
  246. tool_invoke_meta={
  247. tool_response["tool_call_name"]: tool_response["meta"] for tool_response in tool_responses
  248. },
  249. observation={
  250. tool_response["tool_call_name"]: tool_response["tool_response"]
  251. for tool_response in tool_responses
  252. },
  253. answer="",
  254. messages_ids=message_file_ids,
  255. )
  256. self.queue_manager.publish(
  257. QueueAgentThoughtEvent(agent_thought_id=agent_thought.id), PublishFrom.APPLICATION_MANAGER
  258. )
  259. # update prompt tool
  260. for prompt_tool in prompt_messages_tools:
  261. self.update_prompt_message_tool(tool_instances[prompt_tool.name], prompt_tool)
  262. iteration_step += 1
  263. # publish end event
  264. self.queue_manager.publish(
  265. QueueMessageEndEvent(
  266. llm_result=LLMResult(
  267. model=model_instance.model,
  268. prompt_messages=prompt_messages,
  269. message=AssistantPromptMessage(content=final_answer),
  270. usage=llm_usage["usage"] or LLMUsage.empty_usage(),
  271. system_fingerprint="",
  272. )
  273. ),
  274. PublishFrom.APPLICATION_MANAGER,
  275. )
  276. def check_tool_calls(self, llm_result_chunk: LLMResultChunk) -> bool:
  277. """
  278. Check if there is any tool call in llm result chunk
  279. """
  280. if llm_result_chunk.delta.message.tool_calls:
  281. return True
  282. return False
  283. def check_blocking_tool_calls(self, llm_result: LLMResult) -> bool:
  284. """
  285. Check if there is any blocking tool call in llm result
  286. """
  287. if llm_result.message.tool_calls:
  288. return True
  289. return False
  290. def extract_tool_calls(self, llm_result_chunk: LLMResultChunk) -> list[tuple[str, str, dict[str, Any]]]:
  291. """
  292. Extract tool calls from llm result chunk
  293. Returns:
  294. List[Tuple[str, str, Dict[str, Any]]]: [(tool_call_id, tool_call_name, tool_call_args)]
  295. """
  296. tool_calls = []
  297. for prompt_message in llm_result_chunk.delta.message.tool_calls:
  298. args = {}
  299. if prompt_message.function.arguments != "":
  300. args = json.loads(prompt_message.function.arguments)
  301. tool_calls.append(
  302. (
  303. prompt_message.id,
  304. prompt_message.function.name,
  305. args,
  306. )
  307. )
  308. return tool_calls
  309. def extract_blocking_tool_calls(self, llm_result: LLMResult) -> list[tuple[str, str, dict[str, Any]]]:
  310. """
  311. Extract blocking tool calls from llm result
  312. Returns:
  313. List[Tuple[str, str, Dict[str, Any]]]: [(tool_call_id, tool_call_name, tool_call_args)]
  314. """
  315. tool_calls = []
  316. for prompt_message in llm_result.message.tool_calls:
  317. args = {}
  318. if prompt_message.function.arguments != "":
  319. args = json.loads(prompt_message.function.arguments)
  320. tool_calls.append(
  321. (
  322. prompt_message.id,
  323. prompt_message.function.name,
  324. args,
  325. )
  326. )
  327. return tool_calls
  328. def _init_system_message(self, prompt_template: str, prompt_messages: list[PromptMessage]) -> list[PromptMessage]:
  329. """
  330. Initialize system message
  331. """
  332. if not prompt_messages and prompt_template:
  333. return [
  334. SystemPromptMessage(content=prompt_template),
  335. ]
  336. if prompt_messages and not isinstance(prompt_messages[0], SystemPromptMessage) and prompt_template:
  337. prompt_messages.insert(0, SystemPromptMessage(content=prompt_template))
  338. return prompt_messages or []
  339. def _organize_user_query(self, query: str, prompt_messages: list[PromptMessage]) -> list[PromptMessage]:
  340. """
  341. Organize user query
  342. """
  343. if self.files:
  344. prompt_message_contents: list[PromptMessageContent] = []
  345. prompt_message_contents.append(TextPromptMessageContent(data=query))
  346. # get image detail config
  347. image_detail_config = (
  348. self.application_generate_entity.file_upload_config.image_config.detail
  349. if (
  350. self.application_generate_entity.file_upload_config
  351. and self.application_generate_entity.file_upload_config.image_config
  352. )
  353. else None
  354. )
  355. image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW
  356. for file in self.files:
  357. prompt_message_contents.append(
  358. file_manager.to_prompt_message_content(
  359. file,
  360. image_detail_config=image_detail_config,
  361. )
  362. )
  363. prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
  364. else:
  365. prompt_messages.append(UserPromptMessage(content=query))
  366. return prompt_messages
  367. def _clear_user_prompt_image_messages(self, prompt_messages: list[PromptMessage]) -> list[PromptMessage]:
  368. """
  369. As for now, gpt supports both fc and vision at the first iteration.
  370. We need to remove the image messages from the prompt messages at the first iteration.
  371. """
  372. prompt_messages = deepcopy(prompt_messages)
  373. for prompt_message in prompt_messages:
  374. if isinstance(prompt_message, UserPromptMessage):
  375. if isinstance(prompt_message.content, list):
  376. prompt_message.content = "\n".join(
  377. [
  378. content.data
  379. if content.type == PromptMessageContentType.TEXT
  380. else "[image]"
  381. if content.type == PromptMessageContentType.IMAGE
  382. else "[file]"
  383. for content in prompt_message.content
  384. ]
  385. )
  386. return prompt_messages
  387. def _organize_prompt_messages(self):
  388. prompt_template = self.app_config.prompt_template.simple_prompt_template or ""
  389. self.history_prompt_messages = self._init_system_message(prompt_template, self.history_prompt_messages)
  390. query_prompt_messages = self._organize_user_query(self.query or "", [])
  391. self.history_prompt_messages = AgentHistoryPromptTransform(
  392. model_config=self.model_config,
  393. prompt_messages=[*query_prompt_messages, *self._current_thoughts],
  394. history_messages=self.history_prompt_messages,
  395. memory=self.memory,
  396. ).get_prompt()
  397. prompt_messages = [*self.history_prompt_messages, *query_prompt_messages, *self._current_thoughts]
  398. if len(self._current_thoughts) != 0:
  399. # clear messages after the first iteration
  400. prompt_messages = self._clear_user_prompt_image_messages(prompt_messages)
  401. return prompt_messages