fix: max_tokens set to 1000 for "auto_generate_name"
This commit is contained in:
parent
7790214620
commit
d1be2e65b0
@ -48,7 +48,7 @@ class LLMGenerator:
|
||||
response = cast(
|
||||
LLMResult,
|
||||
model_instance.invoke_llm(
|
||||
prompt_messages=list(prompts), model_parameters={"max_tokens": 100, "temperature": 1}, stream=False
|
||||
prompt_messages=list(prompts), model_parameters={"max_tokens": 1000, "temperature": 1}, stream=False
|
||||
),
|
||||
)
|
||||
answer = cast(str, response.message.content)
|
||||
|
Loading…
Reference in New Issue
Block a user