Merge branch "main" into feat/plugins
This commit is contained in:
commit
b8f9747849
@ -259,7 +259,7 @@ def migrate_knowledge_vector_database():
|
||||
skipped_count = 0
|
||||
total_count = 0
|
||||
vector_type = dify_config.VECTOR_STORE
|
||||
upper_colletion_vector_types = {
|
||||
upper_collection_vector_types = {
|
||||
VectorType.MILVUS,
|
||||
VectorType.PGVECTOR,
|
||||
VectorType.RELYT,
|
||||
@ -267,7 +267,7 @@ def migrate_knowledge_vector_database():
|
||||
VectorType.ORACLE,
|
||||
VectorType.ELASTICSEARCH,
|
||||
}
|
||||
lower_colletion_vector_types = {
|
||||
lower_collection_vector_types = {
|
||||
VectorType.ANALYTICDB,
|
||||
VectorType.CHROMA,
|
||||
VectorType.MYSCALE,
|
||||
@ -307,7 +307,7 @@ def migrate_knowledge_vector_database():
|
||||
continue
|
||||
collection_name = ""
|
||||
dataset_id = dataset.id
|
||||
if vector_type in upper_colletion_vector_types:
|
||||
if vector_type in upper_collection_vector_types:
|
||||
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
|
||||
elif vector_type == VectorType.QDRANT:
|
||||
if dataset.collection_binding_id:
|
||||
@ -323,7 +323,7 @@ def migrate_knowledge_vector_database():
|
||||
else:
|
||||
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
|
||||
|
||||
elif vector_type in lower_colletion_vector_types:
|
||||
elif vector_type in lower_collection_vector_types:
|
||||
collection_name = Dataset.gen_collection_name_by_id(dataset_id).lower()
|
||||
else:
|
||||
raise ValueError(f"Vector store {vector_type} is not supported.")
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
Due to the presence of tasks in App Runner that require long execution times, such as LLM generation and external requests, Flask-Sqlalchemy's strategy for database connection pooling is to allocate one connection (transaction) per request. This approach keeps a connection occupied even during non-DB tasks, leading to the inability to acquire new connections during high concurrency requests due to multiple long-running tasks.
|
||||
|
||||
Therefore, the database operations in App Runner and Task Pipeline must ensure connections are closed immediately after use, and it's better to pass IDs rather than Model objects to avoid deattach errors.
|
||||
Therefore, the database operations in App Runner and Task Pipeline must ensure connections are closed immediately after use, and it's better to pass IDs rather than Model objects to avoid detach errors.
|
||||
|
||||
Examples:
|
||||
|
||||
|
@ -91,7 +91,7 @@ class XinferenceProvider(Provider):
|
||||
"""
|
||||
```
|
||||
|
||||
也可以直接抛出对应Erros,并做如下定义,这样在之后的调用中可以直接抛出`InvokeConnectionError`等异常。
|
||||
也可以直接抛出对应 Errors,并做如下定义,这样在之后的调用中可以直接抛出`InvokeConnectionError`等异常。
|
||||
|
||||
```python
|
||||
@property
|
||||
|
@ -16,6 +16,7 @@ help:
|
||||
supported_model_types:
|
||||
- llm
|
||||
- text-embedding
|
||||
- rerank
|
||||
configurate_methods:
|
||||
- predefined-model
|
||||
provider_credential_schema:
|
||||
|
@ -0,0 +1,52 @@
|
||||
model: amazon.nova-lite-v1:0
|
||||
label:
|
||||
en_US: Nova Lite V1
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 300000
|
||||
parameter_rules:
|
||||
- name: max_new_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 5000
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
pricing:
|
||||
input: '0.00006'
|
||||
output: '0.00024'
|
||||
unit: '0.001'
|
||||
currency: USD
|
@ -0,0 +1,52 @@
|
||||
model: amazon.nova-micro-v1:0
|
||||
label:
|
||||
en_US: Nova Micro V1
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: max_new_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 5000
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
pricing:
|
||||
input: '0.000035'
|
||||
output: '0.00014'
|
||||
unit: '0.001'
|
||||
currency: USD
|
@ -0,0 +1,52 @@
|
||||
model: amazon.nova-pro-v1:0
|
||||
label:
|
||||
en_US: Nova Pro V1
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 300000
|
||||
parameter_rules:
|
||||
- name: max_new_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 5000
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
pricing:
|
||||
input: '0.0008'
|
||||
output: '0.0032'
|
||||
unit: '0.001'
|
||||
currency: USD
|
@ -70,6 +70,8 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
|
||||
{"prefix": "cohere.command-r", "support_system_prompts": True, "support_tool_use": True},
|
||||
{"prefix": "amazon.titan", "support_system_prompts": False, "support_tool_use": False},
|
||||
{"prefix": "ai21.jamba-1-5", "support_system_prompts": True, "support_tool_use": False},
|
||||
{"prefix": "amazon.nova", "support_system_prompts": True, "support_tool_use": False},
|
||||
{"prefix": "us.amazon.nova", "support_system_prompts": True, "support_tool_use": False},
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
@ -194,6 +196,13 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
|
||||
if model_info["support_tool_use"] and tools:
|
||||
parameters["toolConfig"] = self._convert_converse_tool_config(tools=tools)
|
||||
try:
|
||||
# for issue #10976
|
||||
conversations_list = parameters["messages"]
|
||||
# if two consecutive user messages found, combine them into one message
|
||||
for i in range(len(conversations_list) - 2, -1, -1):
|
||||
if conversations_list[i]["role"] == conversations_list[i + 1]["role"]:
|
||||
conversations_list[i]["content"].extend(conversations_list.pop(i + 1)["content"])
|
||||
|
||||
if stream:
|
||||
response = bedrock_client.converse_stream(**parameters)
|
||||
return self._handle_converse_stream_response(
|
||||
|
@ -0,0 +1,52 @@
|
||||
model: us.amazon.nova-lite-v1:0
|
||||
label:
|
||||
en_US: Nova Lite V1 (US.Cross Region Inference)
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 300000
|
||||
parameter_rules:
|
||||
- name: max_new_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 5000
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
pricing:
|
||||
input: '0.00006'
|
||||
output: '0.00024'
|
||||
unit: '0.001'
|
||||
currency: USD
|
@ -0,0 +1,52 @@
|
||||
model: us.amazon.nova-micro-v1:0
|
||||
label:
|
||||
en_US: Nova Micro V1 (US.Cross Region Inference)
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: max_new_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 5000
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
pricing:
|
||||
input: '0.000035'
|
||||
output: '0.00014'
|
||||
unit: '0.001'
|
||||
currency: USD
|
@ -0,0 +1,52 @@
|
||||
model: us.amazon.nova-pro-v1:0
|
||||
label:
|
||||
en_US: Nova Pro V1 (US.Cross Region Inference)
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 300000
|
||||
parameter_rules:
|
||||
- name: max_new_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 5000
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
pricing:
|
||||
input: '0.0008'
|
||||
output: '0.0032'
|
||||
unit: '0.001'
|
||||
currency: USD
|
@ -0,0 +1,2 @@
|
||||
- amazon.rerank-v1
|
||||
- cohere.rerank-v3-5
|
@ -0,0 +1,4 @@
|
||||
model: amazon.rerank-v1:0
|
||||
model_type: rerank
|
||||
model_properties:
|
||||
context_size: 5120
|
@ -0,0 +1,4 @@
|
||||
model: cohere.rerank-v3-5:0
|
||||
model_type: rerank
|
||||
model_properties:
|
||||
context_size: 5120
|
147
api/core/model_runtime/model_providers/bedrock/rerank/rerank.py
Normal file
147
api/core/model_runtime/model_providers/bedrock/rerank/rerank.py
Normal file
@ -0,0 +1,147 @@
|
||||
from typing import Optional
|
||||
|
||||
import boto3
|
||||
from botocore.config import Config
|
||||
|
||||
from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult
|
||||
from core.model_runtime.errors.invoke import (
|
||||
InvokeAuthorizationError,
|
||||
InvokeBadRequestError,
|
||||
InvokeConnectionError,
|
||||
InvokeError,
|
||||
InvokeRateLimitError,
|
||||
InvokeServerUnavailableError,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.__base.rerank_model import RerankModel
|
||||
|
||||
|
||||
class BedrockRerankModel(RerankModel):
|
||||
"""
|
||||
Model class for Cohere rerank model.
|
||||
"""
|
||||
|
||||
def _invoke(
|
||||
self,
|
||||
model: str,
|
||||
credentials: dict,
|
||||
query: str,
|
||||
docs: list[str],
|
||||
score_threshold: Optional[float] = None,
|
||||
top_n: Optional[int] = None,
|
||||
user: Optional[str] = None,
|
||||
) -> RerankResult:
|
||||
"""
|
||||
Invoke rerank model
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:param query: search query
|
||||
:param docs: docs for reranking
|
||||
:param score_threshold: score threshold
|
||||
:param top_n: top n
|
||||
:param user: unique user id
|
||||
:return: rerank result
|
||||
"""
|
||||
|
||||
if len(docs) == 0:
|
||||
return RerankResult(model=model, docs=docs)
|
||||
|
||||
# initialize client
|
||||
client_config = Config(region_name=credentials["aws_region"])
|
||||
bedrock_runtime = boto3.client(
|
||||
service_name="bedrock-agent-runtime",
|
||||
config=client_config,
|
||||
aws_access_key_id=credentials.get("aws_access_key_id", ""),
|
||||
aws_secret_access_key=credentials.get("aws_secret_access_key"),
|
||||
)
|
||||
queries = [{"type": "TEXT", "textQuery": {"text": query}}]
|
||||
text_sources = []
|
||||
for text in docs:
|
||||
text_sources.append(
|
||||
{
|
||||
"type": "INLINE",
|
||||
"inlineDocumentSource": {
|
||||
"type": "TEXT",
|
||||
"textDocument": {
|
||||
"text": text,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
modelId = model
|
||||
region = credentials["aws_region"]
|
||||
model_package_arn = f"arn:aws:bedrock:{region}::foundation-model/{modelId}"
|
||||
rerankingConfiguration = {
|
||||
"type": "BEDROCK_RERANKING_MODEL",
|
||||
"bedrockRerankingConfiguration": {
|
||||
"numberOfResults": top_n,
|
||||
"modelConfiguration": {
|
||||
"modelArn": model_package_arn,
|
||||
},
|
||||
},
|
||||
}
|
||||
response = bedrock_runtime.rerank(
|
||||
queries=queries, sources=text_sources, rerankingConfiguration=rerankingConfiguration
|
||||
)
|
||||
|
||||
rerank_documents = []
|
||||
for idx, result in enumerate(response["results"]):
|
||||
# format document
|
||||
index = result["index"]
|
||||
rerank_document = RerankDocument(
|
||||
index=index,
|
||||
text=docs[index],
|
||||
score=result["relevanceScore"],
|
||||
)
|
||||
|
||||
# score threshold check
|
||||
if score_threshold is not None:
|
||||
if rerank_document.score >= score_threshold:
|
||||
rerank_documents.append(rerank_document)
|
||||
else:
|
||||
rerank_documents.append(rerank_document)
|
||||
|
||||
return RerankResult(model=model, docs=rerank_documents)
|
||||
|
||||
def validate_credentials(self, model: str, credentials: dict) -> None:
|
||||
"""
|
||||
Validate model credentials
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:return:
|
||||
"""
|
||||
try:
|
||||
self.invoke(
|
||||
model=model,
|
||||
credentials=credentials,
|
||||
query="What is the capital of the United States?",
|
||||
docs=[
|
||||
"Carson City is the capital city of the American state of Nevada. At the 2010 United States "
|
||||
"Census, Carson City had a population of 55,274.",
|
||||
"The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that "
|
||||
"are a political division controlled by the United States. Its capital is Saipan.",
|
||||
],
|
||||
score_threshold=0.8,
|
||||
)
|
||||
except Exception as ex:
|
||||
raise CredentialsValidateFailedError(str(ex))
|
||||
|
||||
@property
|
||||
def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
|
||||
"""
|
||||
Map model invoke error to unified error
|
||||
The key is the ermd = genai.GenerativeModel(model) error type thrown to the caller
|
||||
The value is the md = genai.GenerativeModel(model) error type thrown by the model,
|
||||
which needs to be converted into a unified error type for the caller.
|
||||
|
||||
:return: Invoke emd = genai.GenerativeModel(model) error mapping
|
||||
"""
|
||||
return {
|
||||
InvokeConnectionError: [],
|
||||
InvokeServerUnavailableError: [],
|
||||
InvokeRateLimitError: [],
|
||||
InvokeAuthorizationError: [],
|
||||
InvokeBadRequestError: [],
|
||||
}
|
@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 8192
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
@ -8,7 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 10240
|
||||
context_size: 1048576
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
@ -4,6 +4,7 @@ label:
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 2048
|
||||
features:
|
||||
- vision
|
||||
parameter_rules:
|
||||
|
@ -4,6 +4,7 @@ label:
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 8192
|
||||
features:
|
||||
- vision
|
||||
- video
|
||||
|
@ -22,18 +22,6 @@ from core.model_runtime.model_providers.__base.large_language_model import Large
|
||||
from core.model_runtime.model_providers.zhipuai._common import _CommonZhipuaiAI
|
||||
from core.model_runtime.utils import helper
|
||||
|
||||
GLM_JSON_MODE_PROMPT = """You should always follow the instructions and output a valid JSON object.
|
||||
The structure of the JSON object you can found in the instructions, use {"answer": "$your_answer"} as the default structure
|
||||
if you are not sure about the structure.
|
||||
|
||||
And you should always end the block with a "```" to indicate the end of the JSON object.
|
||||
|
||||
<instructions>
|
||||
{{instructions}}
|
||||
</instructions>
|
||||
|
||||
```JSON""" # noqa: E501
|
||||
|
||||
|
||||
class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
def _invoke(
|
||||
@ -64,42 +52,8 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
credentials_kwargs = self._to_credential_kwargs(credentials)
|
||||
|
||||
# invoke model
|
||||
# stop = stop or []
|
||||
# self._transform_json_prompts(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user)
|
||||
return self._generate(model, credentials_kwargs, prompt_messages, model_parameters, tools, stop, stream, user)
|
||||
|
||||
# def _transform_json_prompts(self, model: str, credentials: dict,
|
||||
# prompt_messages: list[PromptMessage], model_parameters: dict,
|
||||
# tools: list[PromptMessageTool] | None = None, stop: list[str] | None = None,
|
||||
# stream: bool = True, user: str | None = None) \
|
||||
# -> None:
|
||||
# """
|
||||
# Transform json prompts to model prompts
|
||||
# """
|
||||
# if "}\n\n" not in stop:
|
||||
# stop.append("}\n\n")
|
||||
|
||||
# # check if there is a system message
|
||||
# if len(prompt_messages) > 0 and isinstance(prompt_messages[0], SystemPromptMessage):
|
||||
# # override the system message
|
||||
# prompt_messages[0] = SystemPromptMessage(
|
||||
# content=GLM_JSON_MODE_PROMPT.replace("{{instructions}}", prompt_messages[0].content)
|
||||
# )
|
||||
# else:
|
||||
# # insert the system message
|
||||
# prompt_messages.insert(0, SystemPromptMessage(
|
||||
# content=GLM_JSON_MODE_PROMPT.replace("{{instructions}}", "Please output a valid JSON object.")
|
||||
# ))
|
||||
# # check if the last message is a user message
|
||||
# if len(prompt_messages) > 0 and isinstance(prompt_messages[-1], UserPromptMessage):
|
||||
# # add ```JSON\n to the last message
|
||||
# prompt_messages[-1].content += "\n```JSON\n"
|
||||
# else:
|
||||
# # append a user message
|
||||
# prompt_messages.append(UserPromptMessage(
|
||||
# content="```JSON\n"
|
||||
# ))
|
||||
|
||||
def get_num_tokens(
|
||||
self,
|
||||
model: str,
|
||||
@ -170,7 +124,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
:return: full response or stream response chunk generator result
|
||||
"""
|
||||
extra_model_kwargs = {}
|
||||
# request to glm-4v-plus with stop words will always response "finish_reason":"network_error"
|
||||
# request to glm-4v-plus with stop words will always respond "finish_reason":"network_error"
|
||||
if stop and model != "glm-4v-plus":
|
||||
extra_model_kwargs["stop"] = stop
|
||||
|
||||
@ -186,7 +140,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
# resolve zhipuai model not support system message and user message, assistant message must be in sequence
|
||||
new_prompt_messages: list[PromptMessage] = []
|
||||
for prompt_message in prompt_messages:
|
||||
copy_prompt_message = prompt_message.copy()
|
||||
copy_prompt_message = prompt_message.model_copy()
|
||||
if copy_prompt_message.role in {PromptMessageRole.USER, PromptMessageRole.SYSTEM, PromptMessageRole.TOOL}:
|
||||
if isinstance(copy_prompt_message.content, list):
|
||||
# check if model is 'glm-4v'
|
||||
@ -238,59 +192,38 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
params = self._construct_glm_4v_parameter(model, new_prompt_messages, model_parameters)
|
||||
else:
|
||||
params = {"model": model, "messages": [], **model_parameters}
|
||||
# glm model
|
||||
if not model.startswith("chatglm"):
|
||||
for prompt_message in new_prompt_messages:
|
||||
if prompt_message.role == PromptMessageRole.TOOL:
|
||||
for prompt_message in new_prompt_messages:
|
||||
if prompt_message.role == PromptMessageRole.TOOL:
|
||||
params["messages"].append(
|
||||
{
|
||||
"role": "tool",
|
||||
"content": prompt_message.content,
|
||||
"tool_call_id": prompt_message.tool_call_id,
|
||||
}
|
||||
)
|
||||
elif isinstance(prompt_message, AssistantPromptMessage):
|
||||
if prompt_message.tool_calls:
|
||||
params["messages"].append(
|
||||
{
|
||||
"role": "tool",
|
||||
"role": "assistant",
|
||||
"content": prompt_message.content,
|
||||
"tool_call_id": prompt_message.tool_call_id,
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": tool_call.id,
|
||||
"type": tool_call.type,
|
||||
"function": {
|
||||
"name": tool_call.function.name,
|
||||
"arguments": tool_call.function.arguments,
|
||||
},
|
||||
}
|
||||
for tool_call in prompt_message.tool_calls
|
||||
],
|
||||
}
|
||||
)
|
||||
elif isinstance(prompt_message, AssistantPromptMessage):
|
||||
if prompt_message.tool_calls:
|
||||
params["messages"].append(
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": prompt_message.content,
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": tool_call.id,
|
||||
"type": tool_call.type,
|
||||
"function": {
|
||||
"name": tool_call.function.name,
|
||||
"arguments": tool_call.function.arguments,
|
||||
},
|
||||
}
|
||||
for tool_call in prompt_message.tool_calls
|
||||
],
|
||||
}
|
||||
)
|
||||
else:
|
||||
params["messages"].append({"role": "assistant", "content": prompt_message.content})
|
||||
else:
|
||||
params["messages"].append(
|
||||
{"role": prompt_message.role.value, "content": prompt_message.content}
|
||||
)
|
||||
else:
|
||||
# chatglm model
|
||||
for prompt_message in new_prompt_messages:
|
||||
# merge system message to user message
|
||||
if prompt_message.role in {
|
||||
PromptMessageRole.SYSTEM,
|
||||
PromptMessageRole.TOOL,
|
||||
PromptMessageRole.USER,
|
||||
}:
|
||||
if len(params["messages"]) > 0 and params["messages"][-1]["role"] == "user":
|
||||
params["messages"][-1]["content"] += "\n\n" + prompt_message.content
|
||||
else:
|
||||
params["messages"].append({"role": "user", "content": prompt_message.content})
|
||||
else:
|
||||
params["messages"].append(
|
||||
{"role": prompt_message.role.value, "content": prompt_message.content}
|
||||
)
|
||||
params["messages"].append({"role": "assistant", "content": prompt_message.content})
|
||||
else:
|
||||
params["messages"].append({"role": prompt_message.role.value, "content": prompt_message.content})
|
||||
|
||||
if tools and len(tools) > 0:
|
||||
params["tools"] = [{"type": "function", "function": helper.dump_model(tool)} for tool in tools]
|
||||
@ -406,7 +339,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
Handle llm stream response
|
||||
|
||||
:param model: model name
|
||||
:param response: response
|
||||
:param responses: response
|
||||
:param prompt_messages: prompt messages
|
||||
:return: llm response chunk generator result
|
||||
"""
|
||||
@ -505,7 +438,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
if tools and len(tools) > 0:
|
||||
text += "\n\nTools:"
|
||||
for tool in tools:
|
||||
text += f"\n{tool.json()}"
|
||||
text += f"\n{tool.model_dump_json()}"
|
||||
|
||||
# trim off the trailing ' ' that might come from the "Assistant: "
|
||||
return text.rstrip()
|
||||
|
@ -5,7 +5,7 @@ BAICHUAN_CONTEXT = "用户在与一个客观的助手对话。助手会尊重找
|
||||
CHAT_APP_COMPLETION_PROMPT_CONFIG = {
|
||||
"completion_prompt_config": {
|
||||
"prompt": {
|
||||
"text": "{{#pre_prompt#}}\nHere is the chat histories between human and assistant, inside <histories></histories> XML tags.\n\n<histories>\n{{#histories#}}\n</histories>\n\n\nHuman: {{#query#}}\n\nAssistant: " # noqa: E501
|
||||
"text": "{{#pre_prompt#}}\nHere are the chat histories between human and assistant, inside <histories></histories> XML tags.\n\n<histories>\n{{#histories#}}\n</histories>\n\n\nHuman: {{#query#}}\n\nAssistant: " # noqa: E501
|
||||
},
|
||||
"conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"},
|
||||
},
|
||||
|
@ -375,7 +375,6 @@ class TidbOnQdrantVector(BaseVector):
|
||||
for result in results:
|
||||
if result:
|
||||
document = self._document_from_scored_point(result, Field.CONTENT_KEY.value, Field.METADATA_KEY.value)
|
||||
document.metadata["vector"] = result.vector
|
||||
documents.append(document)
|
||||
|
||||
return documents
|
||||
@ -394,6 +393,7 @@ class TidbOnQdrantVector(BaseVector):
|
||||
) -> Document:
|
||||
return Document(
|
||||
page_content=scored_point.payload.get(content_payload_key),
|
||||
vector=scored_point.vector,
|
||||
metadata=scored_point.payload.get(metadata_payload_key) or {},
|
||||
)
|
||||
|
||||
|
@ -50,7 +50,7 @@ class WordExtractor(BaseExtractor):
|
||||
|
||||
self.web_path = self.file_path
|
||||
# TODO: use a better way to handle the file
|
||||
self.temp_file = tempfile.NamedTemporaryFile() # noqa: SIM115
|
||||
self.temp_file = tempfile.NamedTemporaryFile()
|
||||
self.temp_file.write(r.content)
|
||||
self.file_path = self.temp_file.name
|
||||
elif not os.path.isfile(self.file_path):
|
||||
|
@ -6,9 +6,9 @@ identity:
|
||||
zh_Hans: GitLab 合并请求查询
|
||||
description:
|
||||
human:
|
||||
en_US: A tool for query GitLab merge requests, Input should be a exists reposity or branch.
|
||||
en_US: A tool for query GitLab merge requests, Input should be a exists repository or branch.
|
||||
zh_Hans: 一个用于查询 GitLab 代码合并请求的工具,输入的内容应该是一个已存在的仓库名或者分支。
|
||||
llm: A tool for query GitLab merge requests, Input should be a exists reposity or branch.
|
||||
llm: A tool for query GitLab merge requests, Input should be a exists repository or branch.
|
||||
parameters:
|
||||
- name: repository
|
||||
type: string
|
||||
|
@ -61,7 +61,7 @@ class WolframAlphaTool(BuiltinTool):
|
||||
params["input"] = query
|
||||
else:
|
||||
finished = True
|
||||
if "souces" in response_data["queryresult"]:
|
||||
if "sources" in response_data["queryresult"]:
|
||||
return self.create_link_message(response_data["queryresult"]["sources"]["url"])
|
||||
elif "pods" in response_data["queryresult"]:
|
||||
result = response_data["queryresult"]["pods"][0]["subpods"][0]["plaintext"]
|
||||
|
@ -1,11 +1,9 @@
|
||||
import logging
|
||||
from collections.abc import Mapping, Sequence
|
||||
from mimetypes import guess_extension
|
||||
from os import path
|
||||
from typing import Any
|
||||
|
||||
from configs import dify_config
|
||||
from core.file import File, FileTransferMethod, FileType
|
||||
from core.file import File, FileTransferMethod
|
||||
from core.tools.tool_file_manager import ToolFileManager
|
||||
from core.workflow.entities.node_entities import NodeRunResult
|
||||
from core.workflow.entities.variable_entities import VariableSelector
|
||||
@ -150,11 +148,6 @@ class HttpRequestNode(BaseNode[HttpRequestNodeData]):
|
||||
content = response.content
|
||||
|
||||
if is_file and content_type:
|
||||
# extract filename from url
|
||||
filename = path.basename(url)
|
||||
# extract extension if possible
|
||||
extension = guess_extension(content_type) or ".bin"
|
||||
|
||||
tool_file = ToolFileManager.create_file_by_raw(
|
||||
user_id=self.user_id,
|
||||
tenant_id=self.tenant_id,
|
||||
@ -165,7 +158,6 @@ class HttpRequestNode(BaseNode[HttpRequestNodeData]):
|
||||
|
||||
mapping = {
|
||||
"tool_file_id": tool_file.id,
|
||||
"type": FileType.IMAGE.value,
|
||||
"transfer_method": FileTransferMethod.TOOL_FILE.value,
|
||||
}
|
||||
file = file_factory.build_from_mapping(
|
||||
|
@ -24,7 +24,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
|
||||
"""
|
||||
node_inputs: dict[str, list] = {"conditions": []}
|
||||
|
||||
process_datas: dict[str, list] = {"condition_results": []}
|
||||
process_data: dict[str, list] = {"condition_results": []}
|
||||
|
||||
input_conditions = []
|
||||
final_result = False
|
||||
@ -40,7 +40,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
|
||||
operator=case.logical_operator,
|
||||
)
|
||||
|
||||
process_datas["condition_results"].append(
|
||||
process_data["condition_results"].append(
|
||||
{
|
||||
"group": case.model_dump(),
|
||||
"results": group_result,
|
||||
@ -65,7 +65,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
|
||||
|
||||
selected_case_id = "true" if final_result else "false"
|
||||
|
||||
process_datas["condition_results"].append(
|
||||
process_data["condition_results"].append(
|
||||
{"group": "default", "results": group_result, "final_result": final_result}
|
||||
)
|
||||
|
||||
@ -73,7 +73,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
|
||||
|
||||
except Exception as e:
|
||||
return NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.FAILED, inputs=node_inputs, process_data=process_datas, error=str(e)
|
||||
status=WorkflowNodeExecutionStatus.FAILED, inputs=node_inputs, process_data=process_data, error=str(e)
|
||||
)
|
||||
|
||||
outputs = {"result": final_result, "selected_case_id": selected_case_id}
|
||||
@ -81,7 +81,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
|
||||
data = NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.SUCCEEDED,
|
||||
inputs=node_inputs,
|
||||
process_data=process_datas,
|
||||
process_data=process_data,
|
||||
edge_source_handle=selected_case_id or "false", # Use case ID or 'default'
|
||||
outputs=outputs,
|
||||
)
|
||||
|
@ -116,7 +116,7 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
variable_pool.add([self.node_id, "item"], iterator_list_value[0])
|
||||
|
||||
# init graph engine
|
||||
from core.workflow.graph_engine.graph_engine import GraphEngine
|
||||
from core.workflow.graph_engine.graph_engine import GraphEngine, GraphEngineThreadPool
|
||||
|
||||
graph_engine = GraphEngine(
|
||||
tenant_id=self.tenant_id,
|
||||
@ -162,8 +162,7 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
if self.node_data.is_parallel:
|
||||
futures: list[Future] = []
|
||||
q = Queue()
|
||||
thread_pool = graph_engine.workflow_thread_pool_mapping[graph_engine.thread_pool_id]
|
||||
thread_pool._max_workers = self.node_data.parallel_nums
|
||||
thread_pool = GraphEngineThreadPool(max_workers=self.node_data.parallel_nums, max_submit_count=100)
|
||||
for index, item in enumerate(iterator_list_value):
|
||||
future: Future = thread_pool.submit(
|
||||
self._run_single_iter_parallel,
|
||||
|
@ -815,7 +815,7 @@ class LLMNode(BaseNode[LLMNodeData]):
|
||||
"completion_model": {
|
||||
"conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"},
|
||||
"prompt": {
|
||||
"text": "Here is the chat histories between human and assistant, inside "
|
||||
"text": "Here are the chat histories between human and assistant, inside "
|
||||
"<histories></histories> XML tags.\n\n<histories>\n{{"
|
||||
"#histories#}}\n</histories>\n\n\nHuman: {{#sys.query#}}\n\nAssistant:",
|
||||
"edition_type": "basic",
|
||||
|
@ -98,7 +98,7 @@ Step 3: Structure the extracted parameters to JSON object as specified in <struc
|
||||
Step 4: Ensure that the JSON object is properly formatted and valid. The output should not contain any XML tags. Only the JSON object should be outputted.
|
||||
|
||||
### Memory
|
||||
Here is the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
Here are the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
<histories>
|
||||
{histories}
|
||||
</histories>
|
||||
@ -125,7 +125,7 @@ CHAT_GENERATE_JSON_PROMPT = """You should always follow the instructions and out
|
||||
The structure of the JSON object you can found in the instructions.
|
||||
|
||||
### Memory
|
||||
Here is the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
Here are the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
<histories>
|
||||
{histories}
|
||||
</histories>
|
||||
|
@ -8,7 +8,7 @@ QUESTION_CLASSIFIER_SYSTEM_PROMPT = """
|
||||
### Constraint
|
||||
DO NOT include anything other than the JSON array in your response.
|
||||
### Memory
|
||||
Here is the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
Here are the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
<histories>
|
||||
{histories}
|
||||
</histories>
|
||||
@ -66,7 +66,7 @@ User:{{"input_text": ["bad service, slow to bring the food"], "categories": [{{"
|
||||
Assistant:{{"keywords": ["bad service", "slow", "food", "tip", "terrible", "waitresses"],"category_id": "f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name": "Experience"}}
|
||||
</example>
|
||||
### Memory
|
||||
Here is the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
Here are the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
<histories>
|
||||
{histories}
|
||||
</histories>
|
||||
|
@ -7,8 +7,8 @@ from .enums import InputType, Operation
|
||||
|
||||
|
||||
class OperationNotSupportedError(VariableOperatorNodeError):
|
||||
def __init__(self, *, operation: Operation, varialbe_type: str):
|
||||
super().__init__(f"Operation {operation} is not supported for type {varialbe_type}")
|
||||
def __init__(self, *, operation: Operation, variable_type: str):
|
||||
super().__init__(f"Operation {operation} is not supported for type {variable_type}")
|
||||
|
||||
|
||||
class InputTypeNotSupportedError(VariableOperatorNodeError):
|
||||
|
@ -45,7 +45,7 @@ class VariableAssignerNode(BaseNode[VariableAssignerNodeData]):
|
||||
|
||||
# Check if operation is supported
|
||||
if not helpers.is_operation_supported(variable_type=variable.value_type, operation=item.operation):
|
||||
raise OperationNotSupportedError(operation=item.operation, varialbe_type=variable.value_type)
|
||||
raise OperationNotSupportedError(operation=item.operation, variable_type=variable.value_type)
|
||||
|
||||
# Check if variable input is supported
|
||||
if item.input_type == InputType.VARIABLE and not helpers.is_variable_input_supported(
|
||||
@ -156,4 +156,4 @@ class VariableAssignerNode(BaseNode[VariableAssignerNodeData]):
|
||||
case Operation.DIVIDE:
|
||||
return variable.value / value
|
||||
case _:
|
||||
raise OperationNotSupportedError(operation=operation, varialbe_type=variable.value_type)
|
||||
raise OperationNotSupportedError(operation=operation, variable_type=variable.value_type)
|
||||
|
1867
api/poetry.lock
generated
1867
api/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@ -20,7 +20,7 @@ azure-ai-inference = "~1.0.0b3"
|
||||
azure-ai-ml = "~1.20.0"
|
||||
azure-identity = "1.16.1"
|
||||
beautifulsoup4 = "4.12.2"
|
||||
boto3 = "1.35.17"
|
||||
boto3 = "1.35.74"
|
||||
bs4 = "~0.0.1"
|
||||
cachetools = "~5.3.0"
|
||||
celery = "~5.4.0"
|
||||
|
@ -29,6 +29,7 @@ import { useAppContext } from '@/context/app-context'
|
||||
import { ModelFeatureEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
|
||||
import { useFeatures } from '@/app/components/base/features/hooks'
|
||||
import type { InputForm } from '@/app/components/base/chat/chat/type'
|
||||
import { getLastAnswer } from '@/app/components/base/chat/utils'
|
||||
|
||||
interface ChatItemProps {
|
||||
modelAndParameter: ModelAndParameter
|
||||
@ -101,7 +102,7 @@ const ChatItem: FC<ChatItemProps> = ({
|
||||
query: message,
|
||||
inputs,
|
||||
model_config: configData,
|
||||
parent_message_id: chatListRef.current.at(-1)?.id || null,
|
||||
parent_message_id: getLastAnswer(chatListRef.current)?.id || null,
|
||||
}
|
||||
|
||||
if ((config.file_upload as any).enabled && files?.length && supportVision)
|
||||
|
@ -84,7 +84,7 @@ const FileImageItem = ({
|
||||
className='absolute bottom-0.5 right-0.5 flex items-center justify-center w-6 h-6 rounded-lg bg-components-actionbar-bg shadow-md'
|
||||
onClick={(e) => {
|
||||
e.stopPropagation()
|
||||
downloadFile(url || '', name)
|
||||
downloadFile(url || base64Url || '', name)
|
||||
}}
|
||||
>
|
||||
<RiDownloadLine className='w-4 h-4 text-text-tertiary' />
|
||||
|
@ -80,7 +80,7 @@ const FileItem = ({
|
||||
}
|
||||
</div>
|
||||
{
|
||||
showDownloadAction && (
|
||||
showDownloadAction && url && (
|
||||
<ActionButton
|
||||
size='m'
|
||||
className='hidden group-hover/file-item:flex absolute -right-1 -top-1'
|
||||
|
@ -53,8 +53,7 @@ const ImageGallery: FC<Props> = ({
|
||||
imagePreviewUrl && (
|
||||
<ImagePreview
|
||||
url={imagePreviewUrl}
|
||||
onCancel={() => setImagePreviewUrl('')}
|
||||
/>
|
||||
onCancel={() => setImagePreviewUrl('')} title={''} />
|
||||
)
|
||||
}
|
||||
</div>
|
||||
|
@ -9,7 +9,6 @@ import RemarkGfm from 'remark-gfm'
|
||||
import RehypeRaw from 'rehype-raw'
|
||||
import SyntaxHighlighter from 'react-syntax-highlighter'
|
||||
import { atelierHeathLight } from 'react-syntax-highlighter/dist/esm/styles/hljs'
|
||||
import type { RefObject } from 'react'
|
||||
import { Component, createContext, memo, useContext, useEffect, useMemo, useRef, useState } from 'react'
|
||||
import cn from '@/utils/classnames'
|
||||
import CopyBtn from '@/app/components/base/copy-btn'
|
||||
@ -78,28 +77,6 @@ export function PreCode(props: { children: any }) {
|
||||
)
|
||||
}
|
||||
|
||||
const useLazyLoad = (ref: RefObject<Element>): boolean => {
|
||||
const [isIntersecting, setIntersecting] = useState<boolean>(false)
|
||||
|
||||
useEffect(() => {
|
||||
const observer = new IntersectionObserver(([entry]) => {
|
||||
if (entry.isIntersecting) {
|
||||
setIntersecting(true)
|
||||
observer.disconnect()
|
||||
}
|
||||
})
|
||||
|
||||
if (ref.current)
|
||||
observer.observe(ref.current)
|
||||
|
||||
return () => {
|
||||
observer.disconnect()
|
||||
}
|
||||
}, [ref])
|
||||
|
||||
return isIntersecting
|
||||
}
|
||||
|
||||
const PreContext = createContext({
|
||||
// if children not in PreContext, just leave inline true
|
||||
inline: true,
|
||||
@ -138,7 +115,7 @@ const CodeBlock: Components['code'] = memo(({ className, children, ...props }) =
|
||||
try {
|
||||
return JSON.parse(String(children).replace(/\n$/, ''))
|
||||
}
|
||||
catch { }
|
||||
catch (error) { }
|
||||
}
|
||||
return JSON.parse('{"title":{"text":"ECharts error - Wrong JSON format."}}')
|
||||
}, [language, children])
|
||||
@ -167,7 +144,7 @@ const CodeBlock: Components['code'] = memo(({ className, children, ...props }) =
|
||||
else {
|
||||
return (
|
||||
<SyntaxHighlighter
|
||||
{...props}
|
||||
{...props as any}
|
||||
style={atelierHeathLight}
|
||||
customStyle={{
|
||||
paddingLeft: 12,
|
||||
@ -274,7 +251,7 @@ export function Markdown(props: { content: string; className?: string }) {
|
||||
() => {
|
||||
return (tree) => {
|
||||
const iterate = (node: any) => {
|
||||
if (node.type === 'element' && !node.properties?.src && node.properties?.ref && node.properties.ref.startsWith('{') && node.properties.ref.endsWith('}'))
|
||||
if (node.type === 'element' && node.properties?.ref)
|
||||
delete node.properties.ref
|
||||
|
||||
if (node.children)
|
||||
|
47
web/app/components/base/skeleton/index.tsx
Normal file
47
web/app/components/base/skeleton/index.tsx
Normal file
@ -0,0 +1,47 @@
|
||||
import type { ComponentProps, FC } from 'react'
|
||||
import classNames from '@/utils/classnames'
|
||||
|
||||
type SkeletonProps = ComponentProps<'div'>
|
||||
|
||||
export const SkeletonContanier: FC<SkeletonProps> = (props) => {
|
||||
const { className, children, ...rest } = props
|
||||
return (
|
||||
<div className={classNames('flex flex-col gap-1', className)} {...rest}>
|
||||
{children}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export const SkeletonRow: FC<SkeletonProps> = (props) => {
|
||||
const { className, children, ...rest } = props
|
||||
return (
|
||||
<div className={classNames('flex items-center gap-2', className)} {...rest}>
|
||||
{children}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export const SkeletonRectangle: FC<SkeletonProps> = (props) => {
|
||||
const { className, children, ...rest } = props
|
||||
return (
|
||||
<div className={classNames('h-2 rounded-sm opacity-20 bg-text-tertiary my-1', className)} {...rest}>
|
||||
{children}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export const SkeletonPoint: FC = () =>
|
||||
<div className='text-text-quaternary text-xs font-medium'>·</div>
|
||||
|
||||
/** Usage
|
||||
* <SkeletonContanier>
|
||||
* <SkeletonRow>
|
||||
* <SkeletonRectangle className="w-96" />
|
||||
* <SkeletonPoint />
|
||||
* <SkeletonRectangle className="w-96" />
|
||||
* </SkeletonRow>
|
||||
* <SkeletonRow>
|
||||
* <SkeletonRectangle className="w-96" />
|
||||
* </SkeletonRow>
|
||||
* <SkeletonRow>
|
||||
*/
|
@ -30,7 +30,9 @@ const nodeDefault: NodeDefault<AssignerNodeType> = {
|
||||
errorMessages = t(`${i18nPrefix}.fieldRequired`, { field: t('workflow.nodes.assigner.assignedVariable') })
|
||||
|
||||
if (!errorMessages && value.operation !== WriteMode.clear) {
|
||||
if (value.operation === WriteMode.set) {
|
||||
if (value.operation === WriteMode.set || value.operation === WriteMode.increment
|
||||
|| value.operation === WriteMode.decrement || value.operation === WriteMode.multiply
|
||||
|| value.operation === WriteMode.divide) {
|
||||
if (!value.value && typeof value.value !== 'number')
|
||||
errorMessages = t(`${i18nPrefix}.fieldRequired`, { field: t('workflow.nodes.assigner.variable') })
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ const NodeVariableItem = ({
|
||||
const { t } = useTranslation()
|
||||
return (
|
||||
<div className={cn(
|
||||
'relative flex items-center p-[3px] pl-[5px] gap-1 self-stretch rounded-md bg-workflow-block-param-bg',
|
||||
'relative flex items-center p-[3px] pl-[5px] gap-1 self-stretch rounded-md bg-workflow-block-parma-bg',
|
||||
showBorder && '!bg-black/[0.02]',
|
||||
className,
|
||||
)}>
|
||||
|
@ -97,8 +97,9 @@ const ChatVariableModal = ({
|
||||
return objectPlaceholder
|
||||
}, [type])
|
||||
const getObjectValue = useCallback(() => {
|
||||
if (!chatVar)
|
||||
if (!chatVar || Object.keys(chatVar.value).length === 0)
|
||||
return [DEFAULT_OBJECT_VALUE]
|
||||
|
||||
return Object.keys(chatVar.value).map((key) => {
|
||||
return {
|
||||
key,
|
||||
|
@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Übersetzen',
|
||||
Programming: 'Programmieren',
|
||||
HR: 'Personalwesen',
|
||||
Agent: 'Agent',
|
||||
Workflow: 'Arbeitsablauf',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -30,11 +30,13 @@ const translation = {
|
||||
nameRequired: 'App name is required',
|
||||
},
|
||||
category: {
|
||||
Agent: 'Agent',
|
||||
Assistant: 'Assistant',
|
||||
Writing: 'Writing',
|
||||
Translate: 'Translate',
|
||||
Programming: 'Programming',
|
||||
HR: 'HR',
|
||||
Workflow: 'Workflow',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Traducción',
|
||||
Programming: 'Programación',
|
||||
HR: 'Recursos Humanos',
|
||||
Agent: 'Agente',
|
||||
Workflow: 'Flujo de trabajo',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'ترجمه',
|
||||
Programming: 'برنامهنویسی',
|
||||
HR: 'منابع انسانی',
|
||||
Agent: 'عامل',
|
||||
Workflow: 'گردش',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Traduire',
|
||||
Programming: 'Programmation',
|
||||
HR: 'RH',
|
||||
Agent: 'Agent',
|
||||
Workflow: 'Flux de travail',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,8 @@ const translation = {
|
||||
Translate: 'अनुवाद',
|
||||
Programming: 'प्रोग्रामिंग',
|
||||
HR: 'मानव संसाधन',
|
||||
Workflow: 'कार्यप्रवाह',
|
||||
Agent: 'आढ़तिया',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,8 @@ const translation = {
|
||||
Translate: 'Traduzione',
|
||||
Programming: 'Programmazione',
|
||||
HR: 'Risorse Umane',
|
||||
Workflow: 'Flusso di lavoro',
|
||||
Agent: 'Agente',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: '翻訳',
|
||||
Programming: 'プログラミング',
|
||||
HR: '人事',
|
||||
Workflow: 'ワークフロー',
|
||||
Agent: 'エージェント',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,8 @@ const translation = {
|
||||
Translate: 'Tłumaczenie',
|
||||
Programming: 'Programowanie',
|
||||
HR: 'HR',
|
||||
Agent: 'Agent',
|
||||
Workflow: 'Przepływ pracy',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Traduzir',
|
||||
Programming: 'Programação',
|
||||
HR: 'RH',
|
||||
Workflow: 'Fluxo de trabalho',
|
||||
Agent: 'Agente',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Traducere',
|
||||
Programming: 'Programare',
|
||||
HR: 'Resurse Umane',
|
||||
Agent: 'Agent',
|
||||
Workflow: 'Flux de lucru',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Перевод',
|
||||
Programming: 'Программирование',
|
||||
HR: 'HR',
|
||||
Agent: 'Агент',
|
||||
Workflow: 'Рабочий процесс',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Prevajanje',
|
||||
Programming: 'Programiranje',
|
||||
HR: 'Kadri',
|
||||
Workflow: 'Potek dela',
|
||||
Agent: 'Agent',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'แปล',
|
||||
Programming: 'โปรแกรม',
|
||||
HR: 'ชั่วโมง',
|
||||
Workflow: 'เวิร์กโฟลว์',
|
||||
Agent: 'ตัวแทน',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Çeviri',
|
||||
Programming: 'Programlama',
|
||||
HR: 'İK',
|
||||
Agent: 'Aracı',
|
||||
Workflow: 'İş Akışı',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Переклад',
|
||||
Programming: 'Програмування',
|
||||
HR: 'HR',
|
||||
Workflow: 'Робочий процес',
|
||||
Agent: 'Агент',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Dịch thuật',
|
||||
Programming: 'Lập trình',
|
||||
HR: 'Nhân sự',
|
||||
Agent: 'Người đại lý',
|
||||
Workflow: 'Quy trình làm việc',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -30,11 +30,13 @@ const translation = {
|
||||
nameRequired: '应用程序名称不能为空',
|
||||
},
|
||||
category: {
|
||||
Agent: 'Agent',
|
||||
Assistant: '助手',
|
||||
Writing: '写作',
|
||||
Translate: '翻译',
|
||||
Programming: '编程',
|
||||
HR: '人力资源',
|
||||
Workflow: '工作流',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: '翻譯',
|
||||
Programming: '程式設計',
|
||||
HR: '人力資源',
|
||||
Agent: '代理',
|
||||
Workflow: '工作流',
|
||||
},
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user