Prechádzať zdrojové kódy

feat: ops trace add llm model (#7306)

Joe 7 mesiacov pred
rodič
commit
fee4d3f6ca

+ 1 - 0
api/core/ops/langfuse_trace/langfuse_trace.py

@@ -204,6 +204,7 @@ class LangFuseDataTrace(BaseTraceInstance):
                 node_generation_data = LangfuseGeneration(
                     name="llm",
                     trace_id=trace_id,
+                    model=process_data.get("model_name"),
                     parent_observation_id=node_execution_id,
                     start_time=created_at,
                     end_time=finished_at,

+ 7 - 2
api/core/ops/langsmith_trace/langsmith_trace.py

@@ -139,8 +139,7 @@ class LangSmithDataTrace(BaseTraceInstance):
                 json.loads(node_execution.execution_metadata) if node_execution.execution_metadata else {}
             )
             node_total_tokens = execution_metadata.get("total_tokens", 0)
-
-            metadata = json.loads(node_execution.execution_metadata) if node_execution.execution_metadata else {}
+            metadata = execution_metadata.copy()
             metadata.update(
                 {
                     "workflow_run_id": trace_info.workflow_run_id,
@@ -156,6 +155,12 @@ class LangSmithDataTrace(BaseTraceInstance):
             process_data = json.loads(node_execution.process_data) if node_execution.process_data else {}
             if process_data and process_data.get("model_mode") == "chat":
                 run_type = LangSmithRunType.llm
+                metadata.update(
+                    {
+                        'ls_provider': process_data.get('model_provider', ''),
+                        'ls_model_name': process_data.get('model_name', ''),
+                    }
+                )
             elif node_type == "knowledge-retrieval":
                 run_type = LangSmithRunType.retriever
             else:

+ 3 - 1
api/core/workflow/nodes/llm/llm_node.py

@@ -109,7 +109,9 @@ class LLMNode(BaseNode):
                 'prompts': PromptMessageUtil.prompt_messages_to_prompt_for_saving(
                     model_mode=model_config.mode,
                     prompt_messages=prompt_messages
-                )
+                ),
+                'model_provider': model_config.provider,
+                'model_name': model_config.model,      
             }
 
             # handle invoke result