Merge branch "main" into feat/plugins

This commit is contained in:
Yi 2024-12-05 15:08:09 +08:00
commit b8f9747849
71 changed files with 1619 additions and 1068 deletions

View File

@ -259,7 +259,7 @@ def migrate_knowledge_vector_database():
skipped_count = 0 skipped_count = 0
total_count = 0 total_count = 0
vector_type = dify_config.VECTOR_STORE vector_type = dify_config.VECTOR_STORE
upper_colletion_vector_types = { upper_collection_vector_types = {
VectorType.MILVUS, VectorType.MILVUS,
VectorType.PGVECTOR, VectorType.PGVECTOR,
VectorType.RELYT, VectorType.RELYT,
@ -267,7 +267,7 @@ def migrate_knowledge_vector_database():
VectorType.ORACLE, VectorType.ORACLE,
VectorType.ELASTICSEARCH, VectorType.ELASTICSEARCH,
} }
lower_colletion_vector_types = { lower_collection_vector_types = {
VectorType.ANALYTICDB, VectorType.ANALYTICDB,
VectorType.CHROMA, VectorType.CHROMA,
VectorType.MYSCALE, VectorType.MYSCALE,
@ -307,7 +307,7 @@ def migrate_knowledge_vector_database():
continue continue
collection_name = "" collection_name = ""
dataset_id = dataset.id dataset_id = dataset.id
if vector_type in upper_colletion_vector_types: if vector_type in upper_collection_vector_types:
collection_name = Dataset.gen_collection_name_by_id(dataset_id) collection_name = Dataset.gen_collection_name_by_id(dataset_id)
elif vector_type == VectorType.QDRANT: elif vector_type == VectorType.QDRANT:
if dataset.collection_binding_id: if dataset.collection_binding_id:
@ -323,7 +323,7 @@ def migrate_knowledge_vector_database():
else: else:
collection_name = Dataset.gen_collection_name_by_id(dataset_id) collection_name = Dataset.gen_collection_name_by_id(dataset_id)
elif vector_type in lower_colletion_vector_types: elif vector_type in lower_collection_vector_types:
collection_name = Dataset.gen_collection_name_by_id(dataset_id).lower() collection_name = Dataset.gen_collection_name_by_id(dataset_id).lower()
else: else:
raise ValueError(f"Vector store {vector_type} is not supported.") raise ValueError(f"Vector store {vector_type} is not supported.")

View File

@ -2,7 +2,7 @@
Due to the presence of tasks in App Runner that require long execution times, such as LLM generation and external requests, Flask-Sqlalchemy's strategy for database connection pooling is to allocate one connection (transaction) per request. This approach keeps a connection occupied even during non-DB tasks, leading to the inability to acquire new connections during high concurrency requests due to multiple long-running tasks. Due to the presence of tasks in App Runner that require long execution times, such as LLM generation and external requests, Flask-Sqlalchemy's strategy for database connection pooling is to allocate one connection (transaction) per request. This approach keeps a connection occupied even during non-DB tasks, leading to the inability to acquire new connections during high concurrency requests due to multiple long-running tasks.
Therefore, the database operations in App Runner and Task Pipeline must ensure connections are closed immediately after use, and it's better to pass IDs rather than Model objects to avoid deattach errors. Therefore, the database operations in App Runner and Task Pipeline must ensure connections are closed immediately after use, and it's better to pass IDs rather than Model objects to avoid detach errors.
Examples: Examples:

View File

@ -91,7 +91,7 @@ class XinferenceProvider(Provider):
""" """
``` ```
也可以直接抛出对应Erros并做如下定义这样在之后的调用中可以直接抛出`InvokeConnectionError`等异常。 也可以直接抛出对应 Errors并做如下定义这样在之后的调用中可以直接抛出`InvokeConnectionError`等异常。
```python ```python
@property @property

View File

@ -16,6 +16,7 @@ help:
supported_model_types: supported_model_types:
- llm - llm
- text-embedding - text-embedding
- rerank
configurate_methods: configurate_methods:
- predefined-model - predefined-model
provider_credential_schema: provider_credential_schema:

View File

@ -0,0 +1,52 @@
model: amazon.nova-lite-v1:0
label:
en_US: Nova Lite V1
model_type: llm
features:
- agent-thought
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 300000
parameter_rules:
- name: max_new_tokens
use_template: max_tokens
required: true
default: 2048
min: 1
max: 5000
- name: temperature
use_template: temperature
required: false
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。
en_US: The amount of randomness injected into the response.
- name: top_p
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p但不能同时更改两者。
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
- name: top_k
required: false
type: int
default: 0
min: 0
# tip docs from aws has error, max value is 500
max: 500
help:
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
pricing:
input: '0.00006'
output: '0.00024'
unit: '0.001'
currency: USD

View File

@ -0,0 +1,52 @@
model: amazon.nova-micro-v1:0
label:
en_US: Nova Micro V1
model_type: llm
features:
- agent-thought
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 128000
parameter_rules:
- name: max_new_tokens
use_template: max_tokens
required: true
default: 2048
min: 1
max: 5000
- name: temperature
use_template: temperature
required: false
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。
en_US: The amount of randomness injected into the response.
- name: top_p
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p但不能同时更改两者。
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
- name: top_k
required: false
type: int
default: 0
min: 0
# tip docs from aws has error, max value is 500
max: 500
help:
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
pricing:
input: '0.000035'
output: '0.00014'
unit: '0.001'
currency: USD

View File

@ -0,0 +1,52 @@
model: amazon.nova-pro-v1:0
label:
en_US: Nova Pro V1
model_type: llm
features:
- agent-thought
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 300000
parameter_rules:
- name: max_new_tokens
use_template: max_tokens
required: true
default: 2048
min: 1
max: 5000
- name: temperature
use_template: temperature
required: false
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。
en_US: The amount of randomness injected into the response.
- name: top_p
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p但不能同时更改两者。
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
- name: top_k
required: false
type: int
default: 0
min: 0
# tip docs from aws has error, max value is 500
max: 500
help:
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
pricing:
input: '0.0008'
output: '0.0032'
unit: '0.001'
currency: USD

View File

@ -70,6 +70,8 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
{"prefix": "cohere.command-r", "support_system_prompts": True, "support_tool_use": True}, {"prefix": "cohere.command-r", "support_system_prompts": True, "support_tool_use": True},
{"prefix": "amazon.titan", "support_system_prompts": False, "support_tool_use": False}, {"prefix": "amazon.titan", "support_system_prompts": False, "support_tool_use": False},
{"prefix": "ai21.jamba-1-5", "support_system_prompts": True, "support_tool_use": False}, {"prefix": "ai21.jamba-1-5", "support_system_prompts": True, "support_tool_use": False},
{"prefix": "amazon.nova", "support_system_prompts": True, "support_tool_use": False},
{"prefix": "us.amazon.nova", "support_system_prompts": True, "support_tool_use": False},
] ]
@staticmethod @staticmethod
@ -194,6 +196,13 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
if model_info["support_tool_use"] and tools: if model_info["support_tool_use"] and tools:
parameters["toolConfig"] = self._convert_converse_tool_config(tools=tools) parameters["toolConfig"] = self._convert_converse_tool_config(tools=tools)
try: try:
# for issue #10976
conversations_list = parameters["messages"]
# if two consecutive user messages found, combine them into one message
for i in range(len(conversations_list) - 2, -1, -1):
if conversations_list[i]["role"] == conversations_list[i + 1]["role"]:
conversations_list[i]["content"].extend(conversations_list.pop(i + 1)["content"])
if stream: if stream:
response = bedrock_client.converse_stream(**parameters) response = bedrock_client.converse_stream(**parameters)
return self._handle_converse_stream_response( return self._handle_converse_stream_response(

View File

@ -0,0 +1,52 @@
model: us.amazon.nova-lite-v1:0
label:
en_US: Nova Lite V1 (US.Cross Region Inference)
model_type: llm
features:
- agent-thought
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 300000
parameter_rules:
- name: max_new_tokens
use_template: max_tokens
required: true
default: 2048
min: 1
max: 5000
- name: temperature
use_template: temperature
required: false
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。
en_US: The amount of randomness injected into the response.
- name: top_p
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p但不能同时更改两者。
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
- name: top_k
required: false
type: int
default: 0
min: 0
# tip docs from aws has error, max value is 500
max: 500
help:
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
pricing:
input: '0.00006'
output: '0.00024'
unit: '0.001'
currency: USD

View File

@ -0,0 +1,52 @@
model: us.amazon.nova-micro-v1:0
label:
en_US: Nova Micro V1 (US.Cross Region Inference)
model_type: llm
features:
- agent-thought
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 128000
parameter_rules:
- name: max_new_tokens
use_template: max_tokens
required: true
default: 2048
min: 1
max: 5000
- name: temperature
use_template: temperature
required: false
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。
en_US: The amount of randomness injected into the response.
- name: top_p
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p但不能同时更改两者。
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
- name: top_k
required: false
type: int
default: 0
min: 0
# tip docs from aws has error, max value is 500
max: 500
help:
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
pricing:
input: '0.000035'
output: '0.00014'
unit: '0.001'
currency: USD

View File

@ -0,0 +1,52 @@
model: us.amazon.nova-pro-v1:0
label:
en_US: Nova Pro V1 (US.Cross Region Inference)
model_type: llm
features:
- agent-thought
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 300000
parameter_rules:
- name: max_new_tokens
use_template: max_tokens
required: true
default: 2048
min: 1
max: 5000
- name: temperature
use_template: temperature
required: false
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。
en_US: The amount of randomness injected into the response.
- name: top_p
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p但不能同时更改两者。
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
- name: top_k
required: false
type: int
default: 0
min: 0
# tip docs from aws has error, max value is 500
max: 500
help:
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
pricing:
input: '0.0008'
output: '0.0032'
unit: '0.001'
currency: USD

View File

@ -0,0 +1,2 @@
- amazon.rerank-v1
- cohere.rerank-v3-5

View File

@ -0,0 +1,4 @@
model: amazon.rerank-v1:0
model_type: rerank
model_properties:
context_size: 5120

View File

@ -0,0 +1,4 @@
model: cohere.rerank-v3-5:0
model_type: rerank
model_properties:
context_size: 5120

View File

@ -0,0 +1,147 @@
from typing import Optional
import boto3
from botocore.config import Config
from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult
from core.model_runtime.errors.invoke import (
InvokeAuthorizationError,
InvokeBadRequestError,
InvokeConnectionError,
InvokeError,
InvokeRateLimitError,
InvokeServerUnavailableError,
)
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.rerank_model import RerankModel
class BedrockRerankModel(RerankModel):
"""
Model class for Cohere rerank model.
"""
def _invoke(
self,
model: str,
credentials: dict,
query: str,
docs: list[str],
score_threshold: Optional[float] = None,
top_n: Optional[int] = None,
user: Optional[str] = None,
) -> RerankResult:
"""
Invoke rerank model
:param model: model name
:param credentials: model credentials
:param query: search query
:param docs: docs for reranking
:param score_threshold: score threshold
:param top_n: top n
:param user: unique user id
:return: rerank result
"""
if len(docs) == 0:
return RerankResult(model=model, docs=docs)
# initialize client
client_config = Config(region_name=credentials["aws_region"])
bedrock_runtime = boto3.client(
service_name="bedrock-agent-runtime",
config=client_config,
aws_access_key_id=credentials.get("aws_access_key_id", ""),
aws_secret_access_key=credentials.get("aws_secret_access_key"),
)
queries = [{"type": "TEXT", "textQuery": {"text": query}}]
text_sources = []
for text in docs:
text_sources.append(
{
"type": "INLINE",
"inlineDocumentSource": {
"type": "TEXT",
"textDocument": {
"text": text,
},
},
}
)
modelId = model
region = credentials["aws_region"]
model_package_arn = f"arn:aws:bedrock:{region}::foundation-model/{modelId}"
rerankingConfiguration = {
"type": "BEDROCK_RERANKING_MODEL",
"bedrockRerankingConfiguration": {
"numberOfResults": top_n,
"modelConfiguration": {
"modelArn": model_package_arn,
},
},
}
response = bedrock_runtime.rerank(
queries=queries, sources=text_sources, rerankingConfiguration=rerankingConfiguration
)
rerank_documents = []
for idx, result in enumerate(response["results"]):
# format document
index = result["index"]
rerank_document = RerankDocument(
index=index,
text=docs[index],
score=result["relevanceScore"],
)
# score threshold check
if score_threshold is not None:
if rerank_document.score >= score_threshold:
rerank_documents.append(rerank_document)
else:
rerank_documents.append(rerank_document)
return RerankResult(model=model, docs=rerank_documents)
def validate_credentials(self, model: str, credentials: dict) -> None:
"""
Validate model credentials
:param model: model name
:param credentials: model credentials
:return:
"""
try:
self.invoke(
model=model,
credentials=credentials,
query="What is the capital of the United States?",
docs=[
"Carson City is the capital city of the American state of Nevada. At the 2010 United States "
"Census, Carson City had a population of 55,274.",
"The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that "
"are a political division controlled by the United States. Its capital is Saipan.",
],
score_threshold=0.8,
)
except Exception as ex:
raise CredentialsValidateFailedError(str(ex))
@property
def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
"""
Map model invoke error to unified error
The key is the ermd = genai.GenerativeModel(model) error type thrown to the caller
The value is the md = genai.GenerativeModel(model) error type thrown by the model,
which needs to be converted into a unified error type for the caller.
:return: Invoke emd = genai.GenerativeModel(model) error mapping
"""
return {
InvokeConnectionError: [],
InvokeServerUnavailableError: [],
InvokeRateLimitError: [],
InvokeAuthorizationError: [],
InvokeBadRequestError: [],
}

View File

@ -8,6 +8,7 @@ features:
- stream-tool-call - stream-tool-call
model_properties: model_properties:
mode: chat mode: chat
context_size: 131072
parameter_rules: parameter_rules:
- name: temperature - name: temperature
use_template: temperature use_template: temperature

View File

@ -8,6 +8,7 @@ features:
- stream-tool-call - stream-tool-call
model_properties: model_properties:
mode: chat mode: chat
context_size: 131072
parameter_rules: parameter_rules:
- name: temperature - name: temperature
use_template: temperature use_template: temperature

View File

@ -8,6 +8,7 @@ features:
- stream-tool-call - stream-tool-call
model_properties: model_properties:
mode: chat mode: chat
context_size: 8192
parameter_rules: parameter_rules:
- name: temperature - name: temperature
use_template: temperature use_template: temperature

View File

@ -8,6 +8,7 @@ features:
- stream-tool-call - stream-tool-call
model_properties: model_properties:
mode: chat mode: chat
context_size: 131072
parameter_rules: parameter_rules:
- name: temperature - name: temperature
use_template: temperature use_template: temperature

View File

@ -8,6 +8,7 @@ features:
- stream-tool-call - stream-tool-call
model_properties: model_properties:
mode: chat mode: chat
context_size: 131072
parameter_rules: parameter_rules:
- name: temperature - name: temperature
use_template: temperature use_template: temperature

View File

@ -8,6 +8,7 @@ features:
- stream-tool-call - stream-tool-call
model_properties: model_properties:
mode: chat mode: chat
context_size: 131072
parameter_rules: parameter_rules:
- name: temperature - name: temperature
use_template: temperature use_template: temperature

View File

@ -8,6 +8,7 @@ features:
- stream-tool-call - stream-tool-call
model_properties: model_properties:
mode: chat mode: chat
context_size: 131072
parameter_rules: parameter_rules:
- name: temperature - name: temperature
use_template: temperature use_template: temperature

View File

@ -8,7 +8,7 @@ features:
- stream-tool-call - stream-tool-call
model_properties: model_properties:
mode: chat mode: chat
context_size: 10240 context_size: 1048576
parameter_rules: parameter_rules:
- name: temperature - name: temperature
use_template: temperature use_template: temperature

View File

@ -8,6 +8,7 @@ features:
- stream-tool-call - stream-tool-call
model_properties: model_properties:
mode: chat mode: chat
context_size: 131072
parameter_rules: parameter_rules:
- name: temperature - name: temperature
use_template: temperature use_template: temperature

View File

@ -4,6 +4,7 @@ label:
model_type: llm model_type: llm
model_properties: model_properties:
mode: chat mode: chat
context_size: 2048
features: features:
- vision - vision
parameter_rules: parameter_rules:

View File

@ -4,6 +4,7 @@ label:
model_type: llm model_type: llm
model_properties: model_properties:
mode: chat mode: chat
context_size: 8192
features: features:
- vision - vision
- video - video

View File

@ -22,18 +22,6 @@ from core.model_runtime.model_providers.__base.large_language_model import Large
from core.model_runtime.model_providers.zhipuai._common import _CommonZhipuaiAI from core.model_runtime.model_providers.zhipuai._common import _CommonZhipuaiAI
from core.model_runtime.utils import helper from core.model_runtime.utils import helper
GLM_JSON_MODE_PROMPT = """You should always follow the instructions and output a valid JSON object.
The structure of the JSON object you can found in the instructions, use {"answer": "$your_answer"} as the default structure
if you are not sure about the structure.
And you should always end the block with a "```" to indicate the end of the JSON object.
<instructions>
{{instructions}}
</instructions>
```JSON""" # noqa: E501
class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel): class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
def _invoke( def _invoke(
@ -64,42 +52,8 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
credentials_kwargs = self._to_credential_kwargs(credentials) credentials_kwargs = self._to_credential_kwargs(credentials)
# invoke model # invoke model
# stop = stop or []
# self._transform_json_prompts(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user)
return self._generate(model, credentials_kwargs, prompt_messages, model_parameters, tools, stop, stream, user) return self._generate(model, credentials_kwargs, prompt_messages, model_parameters, tools, stop, stream, user)
# def _transform_json_prompts(self, model: str, credentials: dict,
# prompt_messages: list[PromptMessage], model_parameters: dict,
# tools: list[PromptMessageTool] | None = None, stop: list[str] | None = None,
# stream: bool = True, user: str | None = None) \
# -> None:
# """
# Transform json prompts to model prompts
# """
# if "}\n\n" not in stop:
# stop.append("}\n\n")
# # check if there is a system message
# if len(prompt_messages) > 0 and isinstance(prompt_messages[0], SystemPromptMessage):
# # override the system message
# prompt_messages[0] = SystemPromptMessage(
# content=GLM_JSON_MODE_PROMPT.replace("{{instructions}}", prompt_messages[0].content)
# )
# else:
# # insert the system message
# prompt_messages.insert(0, SystemPromptMessage(
# content=GLM_JSON_MODE_PROMPT.replace("{{instructions}}", "Please output a valid JSON object.")
# ))
# # check if the last message is a user message
# if len(prompt_messages) > 0 and isinstance(prompt_messages[-1], UserPromptMessage):
# # add ```JSON\n to the last message
# prompt_messages[-1].content += "\n```JSON\n"
# else:
# # append a user message
# prompt_messages.append(UserPromptMessage(
# content="```JSON\n"
# ))
def get_num_tokens( def get_num_tokens(
self, self,
model: str, model: str,
@ -170,7 +124,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
:return: full response or stream response chunk generator result :return: full response or stream response chunk generator result
""" """
extra_model_kwargs = {} extra_model_kwargs = {}
# request to glm-4v-plus with stop words will always response "finish_reason":"network_error" # request to glm-4v-plus with stop words will always respond "finish_reason":"network_error"
if stop and model != "glm-4v-plus": if stop and model != "glm-4v-plus":
extra_model_kwargs["stop"] = stop extra_model_kwargs["stop"] = stop
@ -186,7 +140,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
# resolve zhipuai model not support system message and user message, assistant message must be in sequence # resolve zhipuai model not support system message and user message, assistant message must be in sequence
new_prompt_messages: list[PromptMessage] = [] new_prompt_messages: list[PromptMessage] = []
for prompt_message in prompt_messages: for prompt_message in prompt_messages:
copy_prompt_message = prompt_message.copy() copy_prompt_message = prompt_message.model_copy()
if copy_prompt_message.role in {PromptMessageRole.USER, PromptMessageRole.SYSTEM, PromptMessageRole.TOOL}: if copy_prompt_message.role in {PromptMessageRole.USER, PromptMessageRole.SYSTEM, PromptMessageRole.TOOL}:
if isinstance(copy_prompt_message.content, list): if isinstance(copy_prompt_message.content, list):
# check if model is 'glm-4v' # check if model is 'glm-4v'
@ -238,8 +192,6 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
params = self._construct_glm_4v_parameter(model, new_prompt_messages, model_parameters) params = self._construct_glm_4v_parameter(model, new_prompt_messages, model_parameters)
else: else:
params = {"model": model, "messages": [], **model_parameters} params = {"model": model, "messages": [], **model_parameters}
# glm model
if not model.startswith("chatglm"):
for prompt_message in new_prompt_messages: for prompt_message in new_prompt_messages:
if prompt_message.role == PromptMessageRole.TOOL: if prompt_message.role == PromptMessageRole.TOOL:
params["messages"].append( params["messages"].append(
@ -271,26 +223,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
else: else:
params["messages"].append({"role": "assistant", "content": prompt_message.content}) params["messages"].append({"role": "assistant", "content": prompt_message.content})
else: else:
params["messages"].append( params["messages"].append({"role": prompt_message.role.value, "content": prompt_message.content})
{"role": prompt_message.role.value, "content": prompt_message.content}
)
else:
# chatglm model
for prompt_message in new_prompt_messages:
# merge system message to user message
if prompt_message.role in {
PromptMessageRole.SYSTEM,
PromptMessageRole.TOOL,
PromptMessageRole.USER,
}:
if len(params["messages"]) > 0 and params["messages"][-1]["role"] == "user":
params["messages"][-1]["content"] += "\n\n" + prompt_message.content
else:
params["messages"].append({"role": "user", "content": prompt_message.content})
else:
params["messages"].append(
{"role": prompt_message.role.value, "content": prompt_message.content}
)
if tools and len(tools) > 0: if tools and len(tools) > 0:
params["tools"] = [{"type": "function", "function": helper.dump_model(tool)} for tool in tools] params["tools"] = [{"type": "function", "function": helper.dump_model(tool)} for tool in tools]
@ -406,7 +339,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
Handle llm stream response Handle llm stream response
:param model: model name :param model: model name
:param response: response :param responses: response
:param prompt_messages: prompt messages :param prompt_messages: prompt messages
:return: llm response chunk generator result :return: llm response chunk generator result
""" """
@ -505,7 +438,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
if tools and len(tools) > 0: if tools and len(tools) > 0:
text += "\n\nTools:" text += "\n\nTools:"
for tool in tools: for tool in tools:
text += f"\n{tool.json()}" text += f"\n{tool.model_dump_json()}"
# trim off the trailing ' ' that might come from the "Assistant: " # trim off the trailing ' ' that might come from the "Assistant: "
return text.rstrip() return text.rstrip()

View File

@ -5,7 +5,7 @@ BAICHUAN_CONTEXT = "用户在与一个客观的助手对话。助手会尊重找
CHAT_APP_COMPLETION_PROMPT_CONFIG = { CHAT_APP_COMPLETION_PROMPT_CONFIG = {
"completion_prompt_config": { "completion_prompt_config": {
"prompt": { "prompt": {
"text": "{{#pre_prompt#}}\nHere is the chat histories between human and assistant, inside <histories></histories> XML tags.\n\n<histories>\n{{#histories#}}\n</histories>\n\n\nHuman: {{#query#}}\n\nAssistant: " # noqa: E501 "text": "{{#pre_prompt#}}\nHere are the chat histories between human and assistant, inside <histories></histories> XML tags.\n\n<histories>\n{{#histories#}}\n</histories>\n\n\nHuman: {{#query#}}\n\nAssistant: " # noqa: E501
}, },
"conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"}, "conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"},
}, },

View File

@ -375,7 +375,6 @@ class TidbOnQdrantVector(BaseVector):
for result in results: for result in results:
if result: if result:
document = self._document_from_scored_point(result, Field.CONTENT_KEY.value, Field.METADATA_KEY.value) document = self._document_from_scored_point(result, Field.CONTENT_KEY.value, Field.METADATA_KEY.value)
document.metadata["vector"] = result.vector
documents.append(document) documents.append(document)
return documents return documents
@ -394,6 +393,7 @@ class TidbOnQdrantVector(BaseVector):
) -> Document: ) -> Document:
return Document( return Document(
page_content=scored_point.payload.get(content_payload_key), page_content=scored_point.payload.get(content_payload_key),
vector=scored_point.vector,
metadata=scored_point.payload.get(metadata_payload_key) or {}, metadata=scored_point.payload.get(metadata_payload_key) or {},
) )

View File

@ -50,7 +50,7 @@ class WordExtractor(BaseExtractor):
self.web_path = self.file_path self.web_path = self.file_path
# TODO: use a better way to handle the file # TODO: use a better way to handle the file
self.temp_file = tempfile.NamedTemporaryFile() # noqa: SIM115 self.temp_file = tempfile.NamedTemporaryFile()
self.temp_file.write(r.content) self.temp_file.write(r.content)
self.file_path = self.temp_file.name self.file_path = self.temp_file.name
elif not os.path.isfile(self.file_path): elif not os.path.isfile(self.file_path):

View File

@ -6,9 +6,9 @@ identity:
zh_Hans: GitLab 合并请求查询 zh_Hans: GitLab 合并请求查询
description: description:
human: human:
en_US: A tool for query GitLab merge requests, Input should be a exists reposity or branch. en_US: A tool for query GitLab merge requests, Input should be a exists repository or branch.
zh_Hans: 一个用于查询 GitLab 代码合并请求的工具,输入的内容应该是一个已存在的仓库名或者分支。 zh_Hans: 一个用于查询 GitLab 代码合并请求的工具,输入的内容应该是一个已存在的仓库名或者分支。
llm: A tool for query GitLab merge requests, Input should be a exists reposity or branch. llm: A tool for query GitLab merge requests, Input should be a exists repository or branch.
parameters: parameters:
- name: repository - name: repository
type: string type: string

View File

@ -61,7 +61,7 @@ class WolframAlphaTool(BuiltinTool):
params["input"] = query params["input"] = query
else: else:
finished = True finished = True
if "souces" in response_data["queryresult"]: if "sources" in response_data["queryresult"]:
return self.create_link_message(response_data["queryresult"]["sources"]["url"]) return self.create_link_message(response_data["queryresult"]["sources"]["url"])
elif "pods" in response_data["queryresult"]: elif "pods" in response_data["queryresult"]:
result = response_data["queryresult"]["pods"][0]["subpods"][0]["plaintext"] result = response_data["queryresult"]["pods"][0]["subpods"][0]["plaintext"]

View File

@ -1,11 +1,9 @@
import logging import logging
from collections.abc import Mapping, Sequence from collections.abc import Mapping, Sequence
from mimetypes import guess_extension
from os import path
from typing import Any from typing import Any
from configs import dify_config from configs import dify_config
from core.file import File, FileTransferMethod, FileType from core.file import File, FileTransferMethod
from core.tools.tool_file_manager import ToolFileManager from core.tools.tool_file_manager import ToolFileManager
from core.workflow.entities.node_entities import NodeRunResult from core.workflow.entities.node_entities import NodeRunResult
from core.workflow.entities.variable_entities import VariableSelector from core.workflow.entities.variable_entities import VariableSelector
@ -150,11 +148,6 @@ class HttpRequestNode(BaseNode[HttpRequestNodeData]):
content = response.content content = response.content
if is_file and content_type: if is_file and content_type:
# extract filename from url
filename = path.basename(url)
# extract extension if possible
extension = guess_extension(content_type) or ".bin"
tool_file = ToolFileManager.create_file_by_raw( tool_file = ToolFileManager.create_file_by_raw(
user_id=self.user_id, user_id=self.user_id,
tenant_id=self.tenant_id, tenant_id=self.tenant_id,
@ -165,7 +158,6 @@ class HttpRequestNode(BaseNode[HttpRequestNodeData]):
mapping = { mapping = {
"tool_file_id": tool_file.id, "tool_file_id": tool_file.id,
"type": FileType.IMAGE.value,
"transfer_method": FileTransferMethod.TOOL_FILE.value, "transfer_method": FileTransferMethod.TOOL_FILE.value,
} }
file = file_factory.build_from_mapping( file = file_factory.build_from_mapping(

View File

@ -24,7 +24,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
""" """
node_inputs: dict[str, list] = {"conditions": []} node_inputs: dict[str, list] = {"conditions": []}
process_datas: dict[str, list] = {"condition_results": []} process_data: dict[str, list] = {"condition_results": []}
input_conditions = [] input_conditions = []
final_result = False final_result = False
@ -40,7 +40,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
operator=case.logical_operator, operator=case.logical_operator,
) )
process_datas["condition_results"].append( process_data["condition_results"].append(
{ {
"group": case.model_dump(), "group": case.model_dump(),
"results": group_result, "results": group_result,
@ -65,7 +65,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
selected_case_id = "true" if final_result else "false" selected_case_id = "true" if final_result else "false"
process_datas["condition_results"].append( process_data["condition_results"].append(
{"group": "default", "results": group_result, "final_result": final_result} {"group": "default", "results": group_result, "final_result": final_result}
) )
@ -73,7 +73,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
except Exception as e: except Exception as e:
return NodeRunResult( return NodeRunResult(
status=WorkflowNodeExecutionStatus.FAILED, inputs=node_inputs, process_data=process_datas, error=str(e) status=WorkflowNodeExecutionStatus.FAILED, inputs=node_inputs, process_data=process_data, error=str(e)
) )
outputs = {"result": final_result, "selected_case_id": selected_case_id} outputs = {"result": final_result, "selected_case_id": selected_case_id}
@ -81,7 +81,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
data = NodeRunResult( data = NodeRunResult(
status=WorkflowNodeExecutionStatus.SUCCEEDED, status=WorkflowNodeExecutionStatus.SUCCEEDED,
inputs=node_inputs, inputs=node_inputs,
process_data=process_datas, process_data=process_data,
edge_source_handle=selected_case_id or "false", # Use case ID or 'default' edge_source_handle=selected_case_id or "false", # Use case ID or 'default'
outputs=outputs, outputs=outputs,
) )

View File

@ -116,7 +116,7 @@ class IterationNode(BaseNode[IterationNodeData]):
variable_pool.add([self.node_id, "item"], iterator_list_value[0]) variable_pool.add([self.node_id, "item"], iterator_list_value[0])
# init graph engine # init graph engine
from core.workflow.graph_engine.graph_engine import GraphEngine from core.workflow.graph_engine.graph_engine import GraphEngine, GraphEngineThreadPool
graph_engine = GraphEngine( graph_engine = GraphEngine(
tenant_id=self.tenant_id, tenant_id=self.tenant_id,
@ -162,8 +162,7 @@ class IterationNode(BaseNode[IterationNodeData]):
if self.node_data.is_parallel: if self.node_data.is_parallel:
futures: list[Future] = [] futures: list[Future] = []
q = Queue() q = Queue()
thread_pool = graph_engine.workflow_thread_pool_mapping[graph_engine.thread_pool_id] thread_pool = GraphEngineThreadPool(max_workers=self.node_data.parallel_nums, max_submit_count=100)
thread_pool._max_workers = self.node_data.parallel_nums
for index, item in enumerate(iterator_list_value): for index, item in enumerate(iterator_list_value):
future: Future = thread_pool.submit( future: Future = thread_pool.submit(
self._run_single_iter_parallel, self._run_single_iter_parallel,

View File

@ -815,7 +815,7 @@ class LLMNode(BaseNode[LLMNodeData]):
"completion_model": { "completion_model": {
"conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"}, "conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"},
"prompt": { "prompt": {
"text": "Here is the chat histories between human and assistant, inside " "text": "Here are the chat histories between human and assistant, inside "
"<histories></histories> XML tags.\n\n<histories>\n{{" "<histories></histories> XML tags.\n\n<histories>\n{{"
"#histories#}}\n</histories>\n\n\nHuman: {{#sys.query#}}\n\nAssistant:", "#histories#}}\n</histories>\n\n\nHuman: {{#sys.query#}}\n\nAssistant:",
"edition_type": "basic", "edition_type": "basic",

View File

@ -98,7 +98,7 @@ Step 3: Structure the extracted parameters to JSON object as specified in <struc
Step 4: Ensure that the JSON object is properly formatted and valid. The output should not contain any XML tags. Only the JSON object should be outputted. Step 4: Ensure that the JSON object is properly formatted and valid. The output should not contain any XML tags. Only the JSON object should be outputted.
### Memory ### Memory
Here is the chat histories between human and assistant, inside <histories></histories> XML tags. Here are the chat histories between human and assistant, inside <histories></histories> XML tags.
<histories> <histories>
{histories} {histories}
</histories> </histories>
@ -125,7 +125,7 @@ CHAT_GENERATE_JSON_PROMPT = """You should always follow the instructions and out
The structure of the JSON object you can found in the instructions. The structure of the JSON object you can found in the instructions.
### Memory ### Memory
Here is the chat histories between human and assistant, inside <histories></histories> XML tags. Here are the chat histories between human and assistant, inside <histories></histories> XML tags.
<histories> <histories>
{histories} {histories}
</histories> </histories>

View File

@ -8,7 +8,7 @@ QUESTION_CLASSIFIER_SYSTEM_PROMPT = """
### Constraint ### Constraint
DO NOT include anything other than the JSON array in your response. DO NOT include anything other than the JSON array in your response.
### Memory ### Memory
Here is the chat histories between human and assistant, inside <histories></histories> XML tags. Here are the chat histories between human and assistant, inside <histories></histories> XML tags.
<histories> <histories>
{histories} {histories}
</histories> </histories>
@ -66,7 +66,7 @@ User:{{"input_text": ["bad service, slow to bring the food"], "categories": [{{"
Assistant:{{"keywords": ["bad service", "slow", "food", "tip", "terrible", "waitresses"],"category_id": "f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name": "Experience"}} Assistant:{{"keywords": ["bad service", "slow", "food", "tip", "terrible", "waitresses"],"category_id": "f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name": "Experience"}}
</example> </example>
### Memory ### Memory
Here is the chat histories between human and assistant, inside <histories></histories> XML tags. Here are the chat histories between human and assistant, inside <histories></histories> XML tags.
<histories> <histories>
{histories} {histories}
</histories> </histories>

View File

@ -7,8 +7,8 @@ from .enums import InputType, Operation
class OperationNotSupportedError(VariableOperatorNodeError): class OperationNotSupportedError(VariableOperatorNodeError):
def __init__(self, *, operation: Operation, varialbe_type: str): def __init__(self, *, operation: Operation, variable_type: str):
super().__init__(f"Operation {operation} is not supported for type {varialbe_type}") super().__init__(f"Operation {operation} is not supported for type {variable_type}")
class InputTypeNotSupportedError(VariableOperatorNodeError): class InputTypeNotSupportedError(VariableOperatorNodeError):

View File

@ -45,7 +45,7 @@ class VariableAssignerNode(BaseNode[VariableAssignerNodeData]):
# Check if operation is supported # Check if operation is supported
if not helpers.is_operation_supported(variable_type=variable.value_type, operation=item.operation): if not helpers.is_operation_supported(variable_type=variable.value_type, operation=item.operation):
raise OperationNotSupportedError(operation=item.operation, varialbe_type=variable.value_type) raise OperationNotSupportedError(operation=item.operation, variable_type=variable.value_type)
# Check if variable input is supported # Check if variable input is supported
if item.input_type == InputType.VARIABLE and not helpers.is_variable_input_supported( if item.input_type == InputType.VARIABLE and not helpers.is_variable_input_supported(
@ -156,4 +156,4 @@ class VariableAssignerNode(BaseNode[VariableAssignerNodeData]):
case Operation.DIVIDE: case Operation.DIVIDE:
return variable.value / value return variable.value / value
case _: case _:
raise OperationNotSupportedError(operation=operation, varialbe_type=variable.value_type) raise OperationNotSupportedError(operation=operation, variable_type=variable.value_type)

1867
api/poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -20,7 +20,7 @@ azure-ai-inference = "~1.0.0b3"
azure-ai-ml = "~1.20.0" azure-ai-ml = "~1.20.0"
azure-identity = "1.16.1" azure-identity = "1.16.1"
beautifulsoup4 = "4.12.2" beautifulsoup4 = "4.12.2"
boto3 = "1.35.17" boto3 = "1.35.74"
bs4 = "~0.0.1" bs4 = "~0.0.1"
cachetools = "~5.3.0" cachetools = "~5.3.0"
celery = "~5.4.0" celery = "~5.4.0"

View File

@ -29,6 +29,7 @@ import { useAppContext } from '@/context/app-context'
import { ModelFeatureEnum } from '@/app/components/header/account-setting/model-provider-page/declarations' import { ModelFeatureEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { useFeatures } from '@/app/components/base/features/hooks' import { useFeatures } from '@/app/components/base/features/hooks'
import type { InputForm } from '@/app/components/base/chat/chat/type' import type { InputForm } from '@/app/components/base/chat/chat/type'
import { getLastAnswer } from '@/app/components/base/chat/utils'
interface ChatItemProps { interface ChatItemProps {
modelAndParameter: ModelAndParameter modelAndParameter: ModelAndParameter
@ -101,7 +102,7 @@ const ChatItem: FC<ChatItemProps> = ({
query: message, query: message,
inputs, inputs,
model_config: configData, model_config: configData,
parent_message_id: chatListRef.current.at(-1)?.id || null, parent_message_id: getLastAnswer(chatListRef.current)?.id || null,
} }
if ((config.file_upload as any).enabled && files?.length && supportVision) if ((config.file_upload as any).enabled && files?.length && supportVision)

View File

@ -84,7 +84,7 @@ const FileImageItem = ({
className='absolute bottom-0.5 right-0.5 flex items-center justify-center w-6 h-6 rounded-lg bg-components-actionbar-bg shadow-md' className='absolute bottom-0.5 right-0.5 flex items-center justify-center w-6 h-6 rounded-lg bg-components-actionbar-bg shadow-md'
onClick={(e) => { onClick={(e) => {
e.stopPropagation() e.stopPropagation()
downloadFile(url || '', name) downloadFile(url || base64Url || '', name)
}} }}
> >
<RiDownloadLine className='w-4 h-4 text-text-tertiary' /> <RiDownloadLine className='w-4 h-4 text-text-tertiary' />

View File

@ -80,7 +80,7 @@ const FileItem = ({
} }
</div> </div>
{ {
showDownloadAction && ( showDownloadAction && url && (
<ActionButton <ActionButton
size='m' size='m'
className='hidden group-hover/file-item:flex absolute -right-1 -top-1' className='hidden group-hover/file-item:flex absolute -right-1 -top-1'

View File

@ -53,8 +53,7 @@ const ImageGallery: FC<Props> = ({
imagePreviewUrl && ( imagePreviewUrl && (
<ImagePreview <ImagePreview
url={imagePreviewUrl} url={imagePreviewUrl}
onCancel={() => setImagePreviewUrl('')} onCancel={() => setImagePreviewUrl('')} title={''} />
/>
) )
} }
</div> </div>

View File

@ -9,7 +9,6 @@ import RemarkGfm from 'remark-gfm'
import RehypeRaw from 'rehype-raw' import RehypeRaw from 'rehype-raw'
import SyntaxHighlighter from 'react-syntax-highlighter' import SyntaxHighlighter from 'react-syntax-highlighter'
import { atelierHeathLight } from 'react-syntax-highlighter/dist/esm/styles/hljs' import { atelierHeathLight } from 'react-syntax-highlighter/dist/esm/styles/hljs'
import type { RefObject } from 'react'
import { Component, createContext, memo, useContext, useEffect, useMemo, useRef, useState } from 'react' import { Component, createContext, memo, useContext, useEffect, useMemo, useRef, useState } from 'react'
import cn from '@/utils/classnames' import cn from '@/utils/classnames'
import CopyBtn from '@/app/components/base/copy-btn' import CopyBtn from '@/app/components/base/copy-btn'
@ -78,28 +77,6 @@ export function PreCode(props: { children: any }) {
) )
} }
const useLazyLoad = (ref: RefObject<Element>): boolean => {
const [isIntersecting, setIntersecting] = useState<boolean>(false)
useEffect(() => {
const observer = new IntersectionObserver(([entry]) => {
if (entry.isIntersecting) {
setIntersecting(true)
observer.disconnect()
}
})
if (ref.current)
observer.observe(ref.current)
return () => {
observer.disconnect()
}
}, [ref])
return isIntersecting
}
const PreContext = createContext({ const PreContext = createContext({
// if children not in PreContext, just leave inline true // if children not in PreContext, just leave inline true
inline: true, inline: true,
@ -138,7 +115,7 @@ const CodeBlock: Components['code'] = memo(({ className, children, ...props }) =
try { try {
return JSON.parse(String(children).replace(/\n$/, '')) return JSON.parse(String(children).replace(/\n$/, ''))
} }
catch { } catch (error) { }
} }
return JSON.parse('{"title":{"text":"ECharts error - Wrong JSON format."}}') return JSON.parse('{"title":{"text":"ECharts error - Wrong JSON format."}}')
}, [language, children]) }, [language, children])
@ -167,7 +144,7 @@ const CodeBlock: Components['code'] = memo(({ className, children, ...props }) =
else { else {
return ( return (
<SyntaxHighlighter <SyntaxHighlighter
{...props} {...props as any}
style={atelierHeathLight} style={atelierHeathLight}
customStyle={{ customStyle={{
paddingLeft: 12, paddingLeft: 12,
@ -274,7 +251,7 @@ export function Markdown(props: { content: string; className?: string }) {
() => { () => {
return (tree) => { return (tree) => {
const iterate = (node: any) => { const iterate = (node: any) => {
if (node.type === 'element' && !node.properties?.src && node.properties?.ref && node.properties.ref.startsWith('{') && node.properties.ref.endsWith('}')) if (node.type === 'element' && node.properties?.ref)
delete node.properties.ref delete node.properties.ref
if (node.children) if (node.children)

View File

@ -0,0 +1,47 @@
import type { ComponentProps, FC } from 'react'
import classNames from '@/utils/classnames'
type SkeletonProps = ComponentProps<'div'>
export const SkeletonContanier: FC<SkeletonProps> = (props) => {
const { className, children, ...rest } = props
return (
<div className={classNames('flex flex-col gap-1', className)} {...rest}>
{children}
</div>
)
}
export const SkeletonRow: FC<SkeletonProps> = (props) => {
const { className, children, ...rest } = props
return (
<div className={classNames('flex items-center gap-2', className)} {...rest}>
{children}
</div>
)
}
export const SkeletonRectangle: FC<SkeletonProps> = (props) => {
const { className, children, ...rest } = props
return (
<div className={classNames('h-2 rounded-sm opacity-20 bg-text-tertiary my-1', className)} {...rest}>
{children}
</div>
)
}
export const SkeletonPoint: FC = () =>
<div className='text-text-quaternary text-xs font-medium'>·</div>
/** Usage
* <SkeletonContanier>
* <SkeletonRow>
* <SkeletonRectangle className="w-96" />
* <SkeletonPoint />
* <SkeletonRectangle className="w-96" />
* </SkeletonRow>
* <SkeletonRow>
* <SkeletonRectangle className="w-96" />
* </SkeletonRow>
* <SkeletonRow>
*/

View File

@ -30,7 +30,9 @@ const nodeDefault: NodeDefault<AssignerNodeType> = {
errorMessages = t(`${i18nPrefix}.fieldRequired`, { field: t('workflow.nodes.assigner.assignedVariable') }) errorMessages = t(`${i18nPrefix}.fieldRequired`, { field: t('workflow.nodes.assigner.assignedVariable') })
if (!errorMessages && value.operation !== WriteMode.clear) { if (!errorMessages && value.operation !== WriteMode.clear) {
if (value.operation === WriteMode.set) { if (value.operation === WriteMode.set || value.operation === WriteMode.increment
|| value.operation === WriteMode.decrement || value.operation === WriteMode.multiply
|| value.operation === WriteMode.divide) {
if (!value.value && typeof value.value !== 'number') if (!value.value && typeof value.value !== 'number')
errorMessages = t(`${i18nPrefix}.fieldRequired`, { field: t('workflow.nodes.assigner.variable') }) errorMessages = t(`${i18nPrefix}.fieldRequired`, { field: t('workflow.nodes.assigner.variable') })
} }

View File

@ -33,7 +33,7 @@ const NodeVariableItem = ({
const { t } = useTranslation() const { t } = useTranslation()
return ( return (
<div className={cn( <div className={cn(
'relative flex items-center p-[3px] pl-[5px] gap-1 self-stretch rounded-md bg-workflow-block-param-bg', 'relative flex items-center p-[3px] pl-[5px] gap-1 self-stretch rounded-md bg-workflow-block-parma-bg',
showBorder && '!bg-black/[0.02]', showBorder && '!bg-black/[0.02]',
className, className,
)}> )}>

View File

@ -97,8 +97,9 @@ const ChatVariableModal = ({
return objectPlaceholder return objectPlaceholder
}, [type]) }, [type])
const getObjectValue = useCallback(() => { const getObjectValue = useCallback(() => {
if (!chatVar) if (!chatVar || Object.keys(chatVar.value).length === 0)
return [DEFAULT_OBJECT_VALUE] return [DEFAULT_OBJECT_VALUE]
return Object.keys(chatVar.value).map((key) => { return Object.keys(chatVar.value).map((key) => {
return { return {
key, key,

View File

@ -35,6 +35,8 @@ const translation = {
Translate: 'Übersetzen', Translate: 'Übersetzen',
Programming: 'Programmieren', Programming: 'Programmieren',
HR: 'Personalwesen', HR: 'Personalwesen',
Agent: 'Agent',
Workflow: 'Arbeitsablauf',
}, },
} }

View File

@ -30,11 +30,13 @@ const translation = {
nameRequired: 'App name is required', nameRequired: 'App name is required',
}, },
category: { category: {
Agent: 'Agent',
Assistant: 'Assistant', Assistant: 'Assistant',
Writing: 'Writing', Writing: 'Writing',
Translate: 'Translate', Translate: 'Translate',
Programming: 'Programming', Programming: 'Programming',
HR: 'HR', HR: 'HR',
Workflow: 'Workflow',
}, },
} }

View File

@ -35,6 +35,8 @@ const translation = {
Translate: 'Traducción', Translate: 'Traducción',
Programming: 'Programación', Programming: 'Programación',
HR: 'Recursos Humanos', HR: 'Recursos Humanos',
Agent: 'Agente',
Workflow: 'Flujo de trabajo',
}, },
} }

View File

@ -35,6 +35,8 @@ const translation = {
Translate: 'ترجمه', Translate: 'ترجمه',
Programming: 'برنامه‌نویسی', Programming: 'برنامه‌نویسی',
HR: 'منابع انسانی', HR: 'منابع انسانی',
Agent: 'عامل',
Workflow: 'گردش',
}, },
} }

View File

@ -35,6 +35,8 @@ const translation = {
Translate: 'Traduire', Translate: 'Traduire',
Programming: 'Programmation', Programming: 'Programmation',
HR: 'RH', HR: 'RH',
Agent: 'Agent',
Workflow: 'Flux de travail',
}, },
} }

View File

@ -36,6 +36,8 @@ const translation = {
Translate: 'अनुवाद', Translate: 'अनुवाद',
Programming: 'प्रोग्रामिंग', Programming: 'प्रोग्रामिंग',
HR: 'मानव संसाधन', HR: 'मानव संसाधन',
Workflow: 'कार्यप्रवाह',
Agent: 'आढ़तिया',
}, },
} }

View File

@ -36,6 +36,8 @@ const translation = {
Translate: 'Traduzione', Translate: 'Traduzione',
Programming: 'Programmazione', Programming: 'Programmazione',
HR: 'Risorse Umane', HR: 'Risorse Umane',
Workflow: 'Flusso di lavoro',
Agent: 'Agente',
}, },
} }

View File

@ -35,6 +35,8 @@ const translation = {
Translate: '翻訳', Translate: '翻訳',
Programming: 'プログラミング', Programming: 'プログラミング',
HR: '人事', HR: '人事',
Workflow: 'ワークフロー',
Agent: 'エージェント',
}, },
} }

View File

@ -36,6 +36,8 @@ const translation = {
Translate: 'Tłumaczenie', Translate: 'Tłumaczenie',
Programming: 'Programowanie', Programming: 'Programowanie',
HR: 'HR', HR: 'HR',
Agent: 'Agent',
Workflow: 'Przepływ pracy',
}, },
} }

View File

@ -35,6 +35,8 @@ const translation = {
Translate: 'Traduzir', Translate: 'Traduzir',
Programming: 'Programação', Programming: 'Programação',
HR: 'RH', HR: 'RH',
Workflow: 'Fluxo de trabalho',
Agent: 'Agente',
}, },
} }

View File

@ -35,6 +35,8 @@ const translation = {
Translate: 'Traducere', Translate: 'Traducere',
Programming: 'Programare', Programming: 'Programare',
HR: 'Resurse Umane', HR: 'Resurse Umane',
Agent: 'Agent',
Workflow: 'Flux de lucru',
}, },
} }

View File

@ -35,6 +35,8 @@ const translation = {
Translate: 'Перевод', Translate: 'Перевод',
Programming: 'Программирование', Programming: 'Программирование',
HR: 'HR', HR: 'HR',
Agent: 'Агент',
Workflow: 'Рабочий процесс',
}, },
} }

View File

@ -35,6 +35,8 @@ const translation = {
Translate: 'Prevajanje', Translate: 'Prevajanje',
Programming: 'Programiranje', Programming: 'Programiranje',
HR: 'Kadri', HR: 'Kadri',
Workflow: 'Potek dela',
Agent: 'Agent',
}, },
} }

View File

@ -35,6 +35,8 @@ const translation = {
Translate: 'แปล', Translate: 'แปล',
Programming: 'โปรแกรม', Programming: 'โปรแกรม',
HR: 'ชั่วโมง', HR: 'ชั่วโมง',
Workflow: 'เวิร์กโฟลว์',
Agent: 'ตัวแทน',
}, },
} }

View File

@ -35,6 +35,8 @@ const translation = {
Translate: 'Çeviri', Translate: 'Çeviri',
Programming: 'Programlama', Programming: 'Programlama',
HR: 'İK', HR: 'İK',
Agent: 'Aracı',
Workflow: 'İş Akışı',
}, },
} }

View File

@ -35,6 +35,8 @@ const translation = {
Translate: 'Переклад', Translate: 'Переклад',
Programming: 'Програмування', Programming: 'Програмування',
HR: 'HR', HR: 'HR',
Workflow: 'Робочий процес',
Agent: 'Агент',
}, },
} }

View File

@ -35,6 +35,8 @@ const translation = {
Translate: 'Dịch thuật', Translate: 'Dịch thuật',
Programming: 'Lập trình', Programming: 'Lập trình',
HR: 'Nhân sự', HR: 'Nhân sự',
Agent: 'Người đại lý',
Workflow: 'Quy trình làm việc',
}, },
} }

View File

@ -30,11 +30,13 @@ const translation = {
nameRequired: '应用程序名称不能为空', nameRequired: '应用程序名称不能为空',
}, },
category: { category: {
Agent: 'Agent',
Assistant: '助手', Assistant: '助手',
Writing: '写作', Writing: '写作',
Translate: '翻译', Translate: '翻译',
Programming: '编程', Programming: '编程',
HR: '人力资源', HR: '人力资源',
Workflow: '工作流',
}, },
} }

View File

@ -35,6 +35,8 @@ const translation = {
Translate: '翻譯', Translate: '翻譯',
Programming: '程式設計', Programming: '程式設計',
HR: '人力資源', HR: '人力資源',
Agent: '代理',
Workflow: '工作流',
}, },
} }