fix: max_tokens set to 1000 for "auto_generate_name"

This commit is contained in:
yzz 2025-02-24 15:49:03 +08:00
parent 7790214620
commit d1be2e65b0

View File

@ -48,7 +48,7 @@ class LLMGenerator:
response = cast(
LLMResult,
model_instance.invoke_llm(
prompt_messages=list(prompts), model_parameters={"max_tokens": 100, "temperature": 1}, stream=False
prompt_messages=list(prompts), model_parameters={"max_tokens": 1000, "temperature": 1}, stream=False
),
)
answer = cast(str, response.message.content)