From c53a0db4e09da1aa54fd56ceb952f740004dfac9 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Tue, 18 Mar 2025 14:40:54 +0800 Subject: [PATCH] feat: remove pre-calculation of token counts in ChatAppRunner Signed-off-by: -LAN- --- api/core/app/apps/chat/app_runner.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/api/core/app/apps/chat/app_runner.py b/api/core/app/apps/chat/app_runner.py index 46c8031633..08b9fa1dd8 100644 --- a/api/core/app/apps/chat/app_runner.py +++ b/api/core/app/apps/chat/app_runner.py @@ -61,20 +61,6 @@ class ChatAppRunner(AppRunner): ) image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW - # Pre-calculate the number of tokens of the prompt messages, - # and return the rest number of tokens by model context token size limit and max token size limit. - # If the rest number of tokens is not enough, raise exception. - # Include: prompt template, inputs, query(optional), files(optional) - # Not Include: memory, external data, dataset context - self.get_pre_calculate_rest_tokens( - app_record=app_record, - model_config=application_generate_entity.model_conf, - prompt_template_entity=app_config.prompt_template, - inputs=inputs, - files=files, - query=query, - ) - memory = None if application_generate_entity.conversation_id: # get memory of conversation (read-only)