diff --git a/api/core/model_runtime/model_providers/tongyi/llm/llm.py b/api/core/model_runtime/model_providers/tongyi/llm/llm.py index 1dce372bba..bb987d4998 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/llm.py +++ b/api/core/model_runtime/model_providers/tongyi/llm/llm.py @@ -197,7 +197,7 @@ class TongyiLargeLanguageModel(LargeLanguageModel): else: # nothing different between chat model and completion model in tongyi params["messages"] = self._convert_prompt_messages_to_tongyi_messages(prompt_messages) - response = Generation.call(**params, result_format="message", stream=stream, incremental_output=True) + response = Generation.call(**params, result_format="message", stream=stream, incremental_output=stream) if stream: return self._handle_generate_stream_response(model, credentials, response, prompt_messages)