From f0c9bb7c91d7bdc1367aa66ef0b0fefa23cf4a3a Mon Sep 17 00:00:00 2001 From: Yeuoly <45712896+Yeuoly@users.noreply.github.com> Date: Thu, 1 Feb 2024 13:08:31 +0800 Subject: [PATCH] fix: typo (#2318) --- api/core/model_runtime/model_providers/tongyi/llm/llm.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/api/core/model_runtime/model_providers/tongyi/llm/llm.py b/api/core/model_runtime/model_providers/tongyi/llm/llm.py index 033fdd2cc2..1e03bc801e 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/llm.py +++ b/api/core/model_runtime/model_providers/tongyi/llm/llm.py @@ -168,7 +168,7 @@ class TongyiLargeLanguageModel(LargeLanguageModel): return result - def _handle_generate_stream_response(self, model: str, credentials: dict, responses: list[Generator], + def _handle_generate_stream_response(self, model: str, credentials: dict, responses: Generator, prompt_messages: list[PromptMessage]) -> Generator: """ Handle llm stream response @@ -182,7 +182,7 @@ class TongyiLargeLanguageModel(LargeLanguageModel): for index, response in enumerate(responses): resp_finish_reason = response.output.finish_reason resp_content = response.output.text - useage = response.usage + usage = response.usage if resp_finish_reason is None and (resp_content is None or resp_content == ''): continue @@ -194,7 +194,7 @@ class TongyiLargeLanguageModel(LargeLanguageModel): if resp_finish_reason is not None: # transform usage - usage = self._calc_response_usage(model, credentials, useage.input_tokens, useage.output_tokens) + usage = self._calc_response_usage(model, credentials, usage.input_tokens, usage.output_tokens) yield LLMResultChunk( model=model,