From ed5596a8f45a5669fc7e63d7db443db7962cfead Mon Sep 17 00:00:00 2001 From: Jacky Wu Date: Tue, 11 Mar 2025 12:43:24 +0800 Subject: [PATCH] fix: avoid llm node result var not init issue while do retry. (#14286) --- api/core/workflow/nodes/llm/node.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index b61b5b5cb5..fe0ed3e564 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -94,6 +94,9 @@ class LLMNode(BaseNode[LLMNodeData]): def _run(self) -> Generator[NodeEvent | InNodeEvent, None, None]: node_inputs: Optional[dict[str, Any]] = None process_data = None + result_text = "" + usage = LLMUsage.empty_usage() + finish_reason = None try: # init messages template @@ -178,9 +181,6 @@ class LLMNode(BaseNode[LLMNodeData]): stop=stop, ) - result_text = "" - usage = LLMUsage.empty_usage() - finish_reason = None for event in generator: if isinstance(event, RunStreamChunkEvent): yield event