Merge branch 'main' into feat/new-login

* main: (40 commits)
  feat: allow users to specify timeout for text generations and workflows by environment variable (#8395)
  Fix: operation postion of answer in logs (#8411)
  fix: when the variable does not exist, an error should be prompted (#8413)
  fix(workflow): the answer node after the iteration node containing the answer was output prematurely (#8419)
  fix:logs and rm unused codes in CacheEmbedding (#8409)
  fix: resolve runtime error when self.folder is None (#8401)
  Fix: Support Bedrock cross region inference #8190 (Update Model name to distinguish between different region groups) (#8402)
  fix(docker): aliyun oss path env key (#8394)
  fix: pyproject.toml typo (#8396)
  fix: o1-mini 65563 -> 65536 (#8388)
  fix: sandbox issue related httpx and requests (#8397)
  chore: improve usage of striping prefix or suffix of string with Ruff 0.6.5 (#8392)
  fix (#8322 followup): resolve the violation of pylint rules (#8391)
  chore: refurish python code by applying Pylint linter rules (#8322)
  support hunyuan-turbo (#8372)
  chore: update firecrawl scrape to V1 api (#8367)
  fix(workflow): both parallel and single branch errors occur in if-else (#8378)
  fix: edit load balancing not pass id (#8370)
  fix: add before send to remove langfuse defaultErrorResponse (#8361)
  fix: when edit load balancing config not pass the empty filed value hidden (#8366)
  ...
This commit is contained in:
Joe 2024-09-14 14:53:11 +08:00
commit cd277aa2d8
274 changed files with 2303 additions and 1475 deletions

View File

@ -164,7 +164,7 @@ def initialize_extensions(app):
@login_manager.request_loader @login_manager.request_loader
def load_user_from_request(request_from_flask_login): def load_user_from_request(request_from_flask_login):
"""Load user based on the request.""" """Load user based on the request."""
if request.blueprint not in ["console", "inner_api"]: if request.blueprint not in {"console", "inner_api"}:
return None return None
# Check if the user_id contains a dot, indicating the old format # Check if the user_id contains a dot, indicating the old format
auth_header = request.headers.get("Authorization", "") auth_header = request.headers.get("Authorization", "")

View File

@ -104,7 +104,7 @@ def reset_email(email, new_email, email_confirm):
) )
@click.confirmation_option( @click.confirmation_option(
prompt=click.style( prompt=click.style(
"Are you sure you want to reset encrypt key pair?" " this operation cannot be rolled back!", fg="red" "Are you sure you want to reset encrypt key pair? this operation cannot be rolled back!", fg="red"
) )
) )
def reset_encrypt_key_pair(): def reset_encrypt_key_pair():
@ -131,7 +131,7 @@ def reset_encrypt_key_pair():
click.echo( click.echo(
click.style( click.style(
"Congratulations! " "the asymmetric key pair of workspace {} has been reset.".format(tenant.id), "Congratulations! The asymmetric key pair of workspace {} has been reset.".format(tenant.id),
fg="green", fg="green",
) )
) )
@ -140,9 +140,9 @@ def reset_encrypt_key_pair():
@click.command("vdb-migrate", help="migrate vector db.") @click.command("vdb-migrate", help="migrate vector db.")
@click.option("--scope", default="all", prompt=False, help="The scope of vector database to migrate, Default is All.") @click.option("--scope", default="all", prompt=False, help="The scope of vector database to migrate, Default is All.")
def vdb_migrate(scope: str): def vdb_migrate(scope: str):
if scope in ["knowledge", "all"]: if scope in {"knowledge", "all"}:
migrate_knowledge_vector_database() migrate_knowledge_vector_database()
if scope in ["annotation", "all"]: if scope in {"annotation", "all"}:
migrate_annotation_vector_database() migrate_annotation_vector_database()
@ -275,8 +275,7 @@ def migrate_knowledge_vector_database():
for dataset in datasets: for dataset in datasets:
total_count = total_count + 1 total_count = total_count + 1
click.echo( click.echo(
f"Processing the {total_count} dataset {dataset.id}. " f"Processing the {total_count} dataset {dataset.id}. {create_count} created, {skipped_count} skipped."
+ f"{create_count} created, {skipped_count} skipped."
) )
try: try:
click.echo("Create dataset vdb index: {}".format(dataset.id)) click.echo("Create dataset vdb index: {}".format(dataset.id))
@ -594,7 +593,7 @@ def create_tenant(email: str, language: Optional[str] = None, name: Optional[str
click.echo( click.echo(
click.style( click.style(
"Congratulations! Account and tenant created.\n" "Account: {}\nPassword: {}".format(email, new_password), "Congratulations! Account and tenant created.\nAccount: {}\nPassword: {}".format(email, new_password),
fg="green", fg="green",
) )
) )

View File

@ -138,12 +138,12 @@ class EndpointConfig(BaseSettings):
) )
SERVICE_API_URL: str = Field( SERVICE_API_URL: str = Field(
description="Service API Url prefix." "used to display Service API Base Url to the front-end.", description="Service API Url prefix. used to display Service API Base Url to the front-end.",
default="", default="",
) )
APP_WEB_URL: str = Field( APP_WEB_URL: str = Field(
description="WebApp Url prefix." "used to display WebAPP API Base Url to the front-end.", description="WebApp Url prefix. used to display WebAPP API Base Url to the front-end.",
default="", default="",
) )
@ -281,7 +281,7 @@ class LoggingConfig(BaseSettings):
""" """
LOG_LEVEL: str = Field( LOG_LEVEL: str = Field(
description="Log output level, default to INFO." "It is recommended to set it to ERROR for production.", description="Log output level, default to INFO. It is recommended to set it to ERROR for production.",
default="INFO", default="INFO",
) )

View File

@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
CURRENT_VERSION: str = Field( CURRENT_VERSION: str = Field(
description="Dify version", description="Dify version",
default="0.8.0", default="0.8.2",
) )
COMMIT_SHA: str = Field( COMMIT_SHA: str = Field(

View File

@ -94,7 +94,7 @@ class ChatMessageTextApi(Resource):
message_id = args.get("message_id", None) message_id = args.get("message_id", None)
text = args.get("text", None) text = args.get("text", None)
if ( if (
app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value] app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}
and app_model.workflow and app_model.workflow
and app_model.workflow.features_dict and app_model.workflow.features_dict
): ):

View File

@ -465,6 +465,6 @@ api.add_resource(
api.add_resource(PublishedWorkflowApi, "/apps/<uuid:app_id>/workflows/publish") api.add_resource(PublishedWorkflowApi, "/apps/<uuid:app_id>/workflows/publish")
api.add_resource(DefaultBlockConfigsApi, "/apps/<uuid:app_id>/workflows/default-workflow-block-configs") api.add_resource(DefaultBlockConfigsApi, "/apps/<uuid:app_id>/workflows/default-workflow-block-configs")
api.add_resource( api.add_resource(
DefaultBlockConfigApi, "/apps/<uuid:app_id>/workflows/default-workflow-block-configs" "/<string:block_type>" DefaultBlockConfigApi, "/apps/<uuid:app_id>/workflows/default-workflow-block-configs/<string:block_type>"
) )
api.add_resource(ConvertToWorkflowApi, "/apps/<uuid:app_id>/convert-to-workflow") api.add_resource(ConvertToWorkflowApi, "/apps/<uuid:app_id>/convert-to-workflow")

View File

@ -101,7 +101,7 @@ class OAuthCallback(Resource):
) )
# Check account status # Check account status
if account.status == AccountStatus.BANNED.value or account.status == AccountStatus.CLOSED.value: if account.status in {AccountStatus.BANNED.value, AccountStatus.CLOSED.value}:
return {"error": "Account is banned or closed."}, 403 return {"error": "Account is banned or closed."}, 403
if account.status == AccountStatus.PENDING.value: if account.status == AccountStatus.PENDING.value:

View File

@ -399,7 +399,7 @@ class DatasetIndexingEstimateApi(Resource):
) )
except LLMBadRequestError: except LLMBadRequestError:
raise ProviderNotInitializeError( raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider " "in the Settings -> Model Provider." "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
) )
except ProviderTokenNotInitError as ex: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description) raise ProviderNotInitializeError(ex.description)

View File

@ -354,7 +354,7 @@ class DocumentIndexingEstimateApi(DocumentResource):
document_id = str(document_id) document_id = str(document_id)
document = self.get_document(dataset_id, document_id) document = self.get_document(dataset_id, document_id)
if document.indexing_status in ["completed", "error"]: if document.indexing_status in {"completed", "error"}:
raise DocumentAlreadyFinishedError() raise DocumentAlreadyFinishedError()
data_process_rule = document.dataset_process_rule data_process_rule = document.dataset_process_rule
@ -421,7 +421,7 @@ class DocumentBatchIndexingEstimateApi(DocumentResource):
info_list = [] info_list = []
extract_settings = [] extract_settings = []
for document in documents: for document in documents:
if document.indexing_status in ["completed", "error"]: if document.indexing_status in {"completed", "error"}:
raise DocumentAlreadyFinishedError() raise DocumentAlreadyFinishedError()
data_source_info = document.data_source_info_dict data_source_info = document.data_source_info_dict
# format document files info # format document files info
@ -665,7 +665,7 @@ class DocumentProcessingApi(DocumentResource):
db.session.commit() db.session.commit()
elif action == "resume": elif action == "resume":
if document.indexing_status not in ["paused", "error"]: if document.indexing_status not in {"paused", "error"}:
raise InvalidActionError("Document not in paused or error state.") raise InvalidActionError("Document not in paused or error state.")
document.paused_by = None document.paused_by = None

View File

@ -18,9 +18,7 @@ class NotSetupError(BaseHTTPException):
class NotInitValidateError(BaseHTTPException): class NotInitValidateError(BaseHTTPException):
error_code = "not_init_validated" error_code = "not_init_validated"
description = ( description = "Init validation has not been completed yet. Please proceed with the init validation process first."
"Init validation has not been completed yet. " "Please proceed with the init validation process first."
)
code = 401 code = 401

View File

@ -81,7 +81,7 @@ class ChatTextApi(InstalledAppResource):
message_id = args.get("message_id", None) message_id = args.get("message_id", None)
text = args.get("text", None) text = args.get("text", None)
if ( if (
app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value] app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}
and app_model.workflow and app_model.workflow
and app_model.workflow.features_dict and app_model.workflow.features_dict
): ):

View File

@ -92,7 +92,7 @@ class ChatApi(InstalledAppResource):
def post(self, installed_app): def post(self, installed_app):
app_model = installed_app.app app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
@ -140,7 +140,7 @@ class ChatStopApi(InstalledAppResource):
def post(self, installed_app, task_id): def post(self, installed_app, task_id):
app_model = installed_app.app app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
AppQueueManager.set_stop_flag(task_id, InvokeFrom.EXPLORE, current_user.id) AppQueueManager.set_stop_flag(task_id, InvokeFrom.EXPLORE, current_user.id)

View File

@ -20,7 +20,7 @@ class ConversationListApi(InstalledAppResource):
def get(self, installed_app): def get(self, installed_app):
app_model = installed_app.app app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
@ -50,7 +50,7 @@ class ConversationApi(InstalledAppResource):
def delete(self, installed_app, c_id): def delete(self, installed_app, c_id):
app_model = installed_app.app app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
conversation_id = str(c_id) conversation_id = str(c_id)
@ -68,7 +68,7 @@ class ConversationRenameApi(InstalledAppResource):
def post(self, installed_app, c_id): def post(self, installed_app, c_id):
app_model = installed_app.app app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
conversation_id = str(c_id) conversation_id = str(c_id)
@ -90,7 +90,7 @@ class ConversationPinApi(InstalledAppResource):
def patch(self, installed_app, c_id): def patch(self, installed_app, c_id):
app_model = installed_app.app app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
conversation_id = str(c_id) conversation_id = str(c_id)
@ -107,7 +107,7 @@ class ConversationUnPinApi(InstalledAppResource):
def patch(self, installed_app, c_id): def patch(self, installed_app, c_id):
app_model = installed_app.app app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
conversation_id = str(c_id) conversation_id = str(c_id)

View File

@ -31,7 +31,7 @@ class InstalledAppsListApi(Resource):
"app_owner_tenant_id": installed_app.app_owner_tenant_id, "app_owner_tenant_id": installed_app.app_owner_tenant_id,
"is_pinned": installed_app.is_pinned, "is_pinned": installed_app.is_pinned,
"last_used_at": installed_app.last_used_at, "last_used_at": installed_app.last_used_at,
"editable": current_user.role in ["owner", "admin"], "editable": current_user.role in {"owner", "admin"},
"uninstallable": current_tenant_id == installed_app.app_owner_tenant_id, "uninstallable": current_tenant_id == installed_app.app_owner_tenant_id,
} }
for installed_app in installed_apps for installed_app in installed_apps

View File

@ -40,7 +40,7 @@ class MessageListApi(InstalledAppResource):
app_model = installed_app.app app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
@ -125,7 +125,7 @@ class MessageSuggestedQuestionApi(InstalledAppResource):
def get(self, installed_app, message_id): def get(self, installed_app, message_id):
app_model = installed_app.app app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
message_id = str(message_id) message_id = str(message_id)

View File

@ -43,7 +43,7 @@ class AppParameterApi(InstalledAppResource):
"""Retrieve app parameters.""" """Retrieve app parameters."""
app_model = installed_app.app app_model = installed_app.app
if app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]: if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
workflow = app_model.workflow workflow = app_model.workflow
if workflow is None: if workflow is None:
raise AppUnavailableError() raise AppUnavailableError()

View File

@ -218,7 +218,7 @@ api.add_resource(ModelProviderCredentialApi, "/workspaces/current/model-provider
api.add_resource(ModelProviderValidateApi, "/workspaces/current/model-providers/<string:provider>/credentials/validate") api.add_resource(ModelProviderValidateApi, "/workspaces/current/model-providers/<string:provider>/credentials/validate")
api.add_resource(ModelProviderApi, "/workspaces/current/model-providers/<string:provider>") api.add_resource(ModelProviderApi, "/workspaces/current/model-providers/<string:provider>")
api.add_resource( api.add_resource(
ModelProviderIconApi, "/workspaces/current/model-providers/<string:provider>/" "<string:icon_type>/<string:lang>" ModelProviderIconApi, "/workspaces/current/model-providers/<string:provider>/<string:icon_type>/<string:lang>"
) )
api.add_resource( api.add_resource(

View File

@ -194,7 +194,7 @@ class WebappLogoWorkspaceApi(Resource):
raise TooManyFilesError() raise TooManyFilesError()
extension = file.filename.split(".")[-1] extension = file.filename.split(".")[-1]
if extension.lower() not in ["svg", "png"]: if extension.lower() not in {"svg", "png"}:
raise UnsupportedFileTypeError() raise UnsupportedFileTypeError()
try: try:

View File

@ -42,7 +42,7 @@ class AppParameterApi(Resource):
@marshal_with(parameters_fields) @marshal_with(parameters_fields)
def get(self, app_model: App): def get(self, app_model: App):
"""Retrieve app parameters.""" """Retrieve app parameters."""
if app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]: if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
workflow = app_model.workflow workflow = app_model.workflow
if workflow is None: if workflow is None:
raise AppUnavailableError() raise AppUnavailableError()

View File

@ -79,7 +79,7 @@ class TextApi(Resource):
message_id = args.get("message_id", None) message_id = args.get("message_id", None)
text = args.get("text", None) text = args.get("text", None)
if ( if (
app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value] app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}
and app_model.workflow and app_model.workflow
and app_model.workflow.features_dict and app_model.workflow.features_dict
): ):

View File

@ -96,7 +96,7 @@ class ChatApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True)) @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
def post(self, app_model: App, end_user: EndUser): def post(self, app_model: App, end_user: EndUser):
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
@ -144,7 +144,7 @@ class ChatStopApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True)) @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
def post(self, app_model: App, end_user: EndUser, task_id): def post(self, app_model: App, end_user: EndUser, task_id):
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
AppQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id) AppQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)

View File

@ -18,7 +18,7 @@ class ConversationApi(Resource):
@marshal_with(conversation_infinite_scroll_pagination_fields) @marshal_with(conversation_infinite_scroll_pagination_fields)
def get(self, app_model: App, end_user: EndUser): def get(self, app_model: App, end_user: EndUser):
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
@ -52,7 +52,7 @@ class ConversationDetailApi(Resource):
@marshal_with(simple_conversation_fields) @marshal_with(simple_conversation_fields)
def delete(self, app_model: App, end_user: EndUser, c_id): def delete(self, app_model: App, end_user: EndUser, c_id):
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
conversation_id = str(c_id) conversation_id = str(c_id)
@ -69,7 +69,7 @@ class ConversationRenameApi(Resource):
@marshal_with(simple_conversation_fields) @marshal_with(simple_conversation_fields)
def post(self, app_model: App, end_user: EndUser, c_id): def post(self, app_model: App, end_user: EndUser, c_id):
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
conversation_id = str(c_id) conversation_id = str(c_id)

View File

@ -76,7 +76,7 @@ class MessageListApi(Resource):
@marshal_with(message_infinite_scroll_pagination_fields) @marshal_with(message_infinite_scroll_pagination_fields)
def get(self, app_model: App, end_user: EndUser): def get(self, app_model: App, end_user: EndUser):
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
@ -117,7 +117,7 @@ class MessageSuggestedApi(Resource):
def get(self, app_model: App, end_user: EndUser, message_id): def get(self, app_model: App, end_user: EndUser, message_id):
message_id = str(message_id) message_id = str(message_id)
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
try: try:

View File

@ -1,6 +1,7 @@
import logging import logging
from flask_restful import Resource, fields, marshal_with, reqparse from flask_restful import Resource, fields, marshal_with, reqparse
from flask_restful.inputs import int_range
from werkzeug.exceptions import InternalServerError from werkzeug.exceptions import InternalServerError
from controllers.service_api import api from controllers.service_api import api
@ -22,10 +23,12 @@ from core.errors.error import (
) )
from core.model_runtime.errors.invoke import InvokeError from core.model_runtime.errors.invoke import InvokeError
from extensions.ext_database import db from extensions.ext_database import db
from fields.workflow_app_log_fields import workflow_app_log_pagination_fields
from libs import helper from libs import helper
from models.model import App, AppMode, EndUser from models.model import App, AppMode, EndUser
from models.workflow import WorkflowRun from models.workflow import WorkflowRun
from services.app_generate_service import AppGenerateService from services.app_generate_service import AppGenerateService
from services.workflow_app_service import WorkflowAppService
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -113,6 +116,30 @@ class WorkflowTaskStopApi(Resource):
return {"result": "success"} return {"result": "success"}
class WorkflowAppLogApi(Resource):
@validate_app_token
@marshal_with(workflow_app_log_pagination_fields)
def get(self, app_model: App):
"""
Get workflow app logs
"""
parser = reqparse.RequestParser()
parser.add_argument("keyword", type=str, location="args")
parser.add_argument("status", type=str, choices=["succeeded", "failed", "stopped"], location="args")
parser.add_argument("page", type=int_range(1, 99999), default=1, location="args")
parser.add_argument("limit", type=int_range(1, 100), default=20, location="args")
args = parser.parse_args()
# get paginate workflow app logs
workflow_app_service = WorkflowAppService()
workflow_app_log_pagination = workflow_app_service.get_paginate_workflow_app_logs(
app_model=app_model, args=args
)
return workflow_app_log_pagination
api.add_resource(WorkflowRunApi, "/workflows/run") api.add_resource(WorkflowRunApi, "/workflows/run")
api.add_resource(WorkflowRunDetailApi, "/workflows/run/<string:workflow_id>") api.add_resource(WorkflowRunDetailApi, "/workflows/run/<string:workflow_id>")
api.add_resource(WorkflowTaskStopApi, "/workflows/tasks/<string:task_id>/stop") api.add_resource(WorkflowTaskStopApi, "/workflows/tasks/<string:task_id>/stop")
api.add_resource(WorkflowAppLogApi, "/workflows/logs")

View File

@ -41,7 +41,7 @@ class AppParameterApi(WebApiResource):
@marshal_with(parameters_fields) @marshal_with(parameters_fields)
def get(self, app_model: App, end_user): def get(self, app_model: App, end_user):
"""Retrieve app parameters.""" """Retrieve app parameters."""
if app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]: if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
workflow = app_model.workflow workflow = app_model.workflow
if workflow is None: if workflow is None:
raise AppUnavailableError() raise AppUnavailableError()

View File

@ -78,7 +78,7 @@ class TextApi(WebApiResource):
message_id = args.get("message_id", None) message_id = args.get("message_id", None)
text = args.get("text", None) text = args.get("text", None)
if ( if (
app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value] app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}
and app_model.workflow and app_model.workflow
and app_model.workflow.features_dict and app_model.workflow.features_dict
): ):

View File

@ -87,7 +87,7 @@ class CompletionStopApi(WebApiResource):
class ChatApi(WebApiResource): class ChatApi(WebApiResource):
def post(self, app_model, end_user): def post(self, app_model, end_user):
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
@ -136,7 +136,7 @@ class ChatApi(WebApiResource):
class ChatStopApi(WebApiResource): class ChatStopApi(WebApiResource):
def post(self, app_model, end_user, task_id): def post(self, app_model, end_user, task_id):
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
AppQueueManager.set_stop_flag(task_id, InvokeFrom.WEB_APP, end_user.id) AppQueueManager.set_stop_flag(task_id, InvokeFrom.WEB_APP, end_user.id)

View File

@ -18,7 +18,7 @@ class ConversationListApi(WebApiResource):
@marshal_with(conversation_infinite_scroll_pagination_fields) @marshal_with(conversation_infinite_scroll_pagination_fields)
def get(self, app_model, end_user): def get(self, app_model, end_user):
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
@ -56,7 +56,7 @@ class ConversationListApi(WebApiResource):
class ConversationApi(WebApiResource): class ConversationApi(WebApiResource):
def delete(self, app_model, end_user, c_id): def delete(self, app_model, end_user, c_id):
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
conversation_id = str(c_id) conversation_id = str(c_id)
@ -73,7 +73,7 @@ class ConversationRenameApi(WebApiResource):
@marshal_with(simple_conversation_fields) @marshal_with(simple_conversation_fields)
def post(self, app_model, end_user, c_id): def post(self, app_model, end_user, c_id):
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
conversation_id = str(c_id) conversation_id = str(c_id)
@ -92,7 +92,7 @@ class ConversationRenameApi(WebApiResource):
class ConversationPinApi(WebApiResource): class ConversationPinApi(WebApiResource):
def patch(self, app_model, end_user, c_id): def patch(self, app_model, end_user, c_id):
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
conversation_id = str(c_id) conversation_id = str(c_id)
@ -108,7 +108,7 @@ class ConversationPinApi(WebApiResource):
class ConversationUnPinApi(WebApiResource): class ConversationUnPinApi(WebApiResource):
def patch(self, app_model, end_user, c_id): def patch(self, app_model, end_user, c_id):
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
conversation_id = str(c_id) conversation_id = str(c_id)

View File

@ -78,7 +78,7 @@ class MessageListApi(WebApiResource):
@marshal_with(message_infinite_scroll_pagination_fields) @marshal_with(message_infinite_scroll_pagination_fields)
def get(self, app_model, end_user): def get(self, app_model, end_user):
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError() raise NotChatAppError()
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
@ -160,7 +160,7 @@ class MessageMoreLikeThisApi(WebApiResource):
class MessageSuggestedQuestionApi(WebApiResource): class MessageSuggestedQuestionApi(WebApiResource):
def get(self, app_model, end_user, message_id): def get(self, app_model, end_user, message_id):
app_mode = AppMode.value_of(app_model.mode) app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]: if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotCompletionAppError() raise NotCompletionAppError()
message_id = str(message_id) message_id = str(message_id)

View File

@ -90,7 +90,7 @@ class CotAgentOutputParser:
if not in_code_block and not in_json: if not in_code_block and not in_json:
if delta.lower() == action_str[action_idx] and action_idx == 0: if delta.lower() == action_str[action_idx] and action_idx == 0:
if last_character not in ["\n", " ", ""]: if last_character not in {"\n", " ", ""}:
index += steps index += steps
yield delta yield delta
continue continue
@ -117,7 +117,7 @@ class CotAgentOutputParser:
action_idx = 0 action_idx = 0
if delta.lower() == thought_str[thought_idx] and thought_idx == 0: if delta.lower() == thought_str[thought_idx] and thought_idx == 0:
if last_character not in ["\n", " ", ""]: if last_character not in {"\n", " ", ""}:
index += steps index += steps
yield delta yield delta
continue continue

View File

@ -29,7 +29,7 @@ class BaseAppConfigManager:
additional_features.show_retrieve_source = RetrievalResourceConfigManager.convert(config=config_dict) additional_features.show_retrieve_source = RetrievalResourceConfigManager.convert(config=config_dict)
additional_features.file_upload = FileUploadConfigManager.convert( additional_features.file_upload = FileUploadConfigManager.convert(
config=config_dict, is_vision=app_mode in [AppMode.CHAT, AppMode.COMPLETION, AppMode.AGENT_CHAT] config=config_dict, is_vision=app_mode in {AppMode.CHAT, AppMode.COMPLETION, AppMode.AGENT_CHAT}
) )
additional_features.opening_statement, additional_features.suggested_questions = ( additional_features.opening_statement, additional_features.suggested_questions = (

View File

@ -18,7 +18,7 @@ class AgentConfigManager:
if agent_strategy == "function_call": if agent_strategy == "function_call":
strategy = AgentEntity.Strategy.FUNCTION_CALLING strategy = AgentEntity.Strategy.FUNCTION_CALLING
elif agent_strategy == "cot" or agent_strategy == "react": elif agent_strategy in {"cot", "react"}:
strategy = AgentEntity.Strategy.CHAIN_OF_THOUGHT strategy = AgentEntity.Strategy.CHAIN_OF_THOUGHT
else: else:
# old configs, try to detect default strategy # old configs, try to detect default strategy
@ -43,10 +43,10 @@ class AgentConfigManager:
agent_tools.append(AgentToolEntity(**agent_tool_properties)) agent_tools.append(AgentToolEntity(**agent_tool_properties))
if "strategy" in config["agent_mode"] and config["agent_mode"]["strategy"] not in [ if "strategy" in config["agent_mode"] and config["agent_mode"]["strategy"] not in {
"react_router", "react_router",
"router", "router",
]: }:
agent_prompt = agent_dict.get("prompt", None) or {} agent_prompt = agent_dict.get("prompt", None) or {}
# check model mode # check model mode
model_mode = config.get("model", {}).get("mode", "completion") model_mode = config.get("model", {}).get("mode", "completion")

View File

@ -167,7 +167,7 @@ class DatasetConfigManager:
config["agent_mode"]["strategy"] = PlanningStrategy.ROUTER.value config["agent_mode"]["strategy"] = PlanningStrategy.ROUTER.value
has_datasets = False has_datasets = False
if config["agent_mode"]["strategy"] in [PlanningStrategy.ROUTER.value, PlanningStrategy.REACT_ROUTER.value]: if config["agent_mode"]["strategy"] in {PlanningStrategy.ROUTER.value, PlanningStrategy.REACT_ROUTER.value}:
for tool in config["agent_mode"]["tools"]: for tool in config["agent_mode"]["tools"]:
key = list(tool.keys())[0] key = list(tool.keys())[0]
if key == "dataset": if key == "dataset":

View File

@ -86,7 +86,7 @@ class PromptTemplateConfigManager:
if config["prompt_type"] == PromptTemplateEntity.PromptType.ADVANCED.value: if config["prompt_type"] == PromptTemplateEntity.PromptType.ADVANCED.value:
if not config["chat_prompt_config"] and not config["completion_prompt_config"]: if not config["chat_prompt_config"] and not config["completion_prompt_config"]:
raise ValueError( raise ValueError(
"chat_prompt_config or completion_prompt_config is required " "when prompt_type is advanced" "chat_prompt_config or completion_prompt_config is required when prompt_type is advanced"
) )
model_mode_vals = [mode.value for mode in ModelMode] model_mode_vals = [mode.value for mode in ModelMode]

View File

@ -42,12 +42,12 @@ class BasicVariablesConfigManager:
variable=variable["variable"], type=variable["type"], config=variable["config"] variable=variable["variable"], type=variable["type"], config=variable["config"]
) )
) )
elif variable_type in [ elif variable_type in {
VariableEntityType.TEXT_INPUT, VariableEntityType.TEXT_INPUT,
VariableEntityType.PARAGRAPH, VariableEntityType.PARAGRAPH,
VariableEntityType.NUMBER, VariableEntityType.NUMBER,
VariableEntityType.SELECT, VariableEntityType.SELECT,
]: }:
variable = variables[variable_type] variable = variables[variable_type]
variable_entities.append( variable_entities.append(
VariableEntity( VariableEntity(
@ -97,7 +97,7 @@ class BasicVariablesConfigManager:
variables = [] variables = []
for item in config["user_input_form"]: for item in config["user_input_form"]:
key = list(item.keys())[0] key = list(item.keys())[0]
if key not in ["text-input", "select", "paragraph", "number", "external_data_tool"]: if key not in {"text-input", "select", "paragraph", "number", "external_data_tool"}:
raise ValueError("Keys in user_input_form list can only be 'text-input', 'paragraph' or 'select'") raise ValueError("Keys in user_input_form list can only be 'text-input', 'paragraph' or 'select'")
form_item = item[key] form_item = item[key]
@ -115,7 +115,7 @@ class BasicVariablesConfigManager:
pattern = re.compile(r"^(?!\d)[\u4e00-\u9fa5A-Za-z0-9_\U0001F300-\U0001F64F\U0001F680-\U0001F6FF]{1,100}$") pattern = re.compile(r"^(?!\d)[\u4e00-\u9fa5A-Za-z0-9_\U0001F300-\U0001F64F\U0001F680-\U0001F6FF]{1,100}$")
if pattern.match(form_item["variable"]) is None: if pattern.match(form_item["variable"]) is None:
raise ValueError("variable in user_input_form must be a string, " "and cannot start with a number") raise ValueError("variable in user_input_form must be a string, and cannot start with a number")
variables.append(form_item["variable"]) variables.append(form_item["variable"])

View File

@ -92,7 +92,7 @@ class VariableEntityType(str, Enum):
SELECT = "select" SELECT = "select"
PARAGRAPH = "paragraph" PARAGRAPH = "paragraph"
NUMBER = "number" NUMBER = "number"
EXTERNAL_DATA_TOOL = "external-data-tool" EXTERNAL_DATA_TOOL = "external_data_tool"
class VariableEntity(BaseModel): class VariableEntity(BaseModel):

View File

@ -54,14 +54,14 @@ class FileUploadConfigManager:
if is_vision: if is_vision:
detail = config["file_upload"]["image"]["detail"] detail = config["file_upload"]["image"]["detail"]
if detail not in ["high", "low"]: if detail not in {"high", "low"}:
raise ValueError("detail must be in ['high', 'low']") raise ValueError("detail must be in ['high', 'low']")
transfer_methods = config["file_upload"]["image"]["transfer_methods"] transfer_methods = config["file_upload"]["image"]["transfer_methods"]
if not isinstance(transfer_methods, list): if not isinstance(transfer_methods, list):
raise ValueError("transfer_methods must be of list type") raise ValueError("transfer_methods must be of list type")
for method in transfer_methods: for method in transfer_methods:
if method not in ["remote_url", "local_file"]: if method not in {"remote_url", "local_file"}:
raise ValueError("transfer_methods must be in ['remote_url', 'local_file']") raise ValueError("transfer_methods must be in ['remote_url', 'local_file']")
return config, ["file_upload"] return config, ["file_upload"]

View File

@ -73,7 +73,7 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner):
raise ValueError("Workflow not initialized") raise ValueError("Workflow not initialized")
user_id = None user_id = None
if self.application_generate_entity.invoke_from in [InvokeFrom.WEB_APP, InvokeFrom.SERVICE_API]: if self.application_generate_entity.invoke_from in {InvokeFrom.WEB_APP, InvokeFrom.SERVICE_API}:
end_user = db.session.query(EndUser).filter(EndUser.id == self.application_generate_entity.user_id).first() end_user = db.session.query(EndUser).filter(EndUser.id == self.application_generate_entity.user_id).first()
if end_user: if end_user:
user_id = end_user.session_id user_id = end_user.session_id
@ -175,7 +175,7 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner):
user_id=self.application_generate_entity.user_id, user_id=self.application_generate_entity.user_id,
user_from=( user_from=(
UserFrom.ACCOUNT UserFrom.ACCOUNT
if self.application_generate_entity.invoke_from in [InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER] if self.application_generate_entity.invoke_from in {InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER}
else UserFrom.END_USER else UserFrom.END_USER
), ),
invoke_from=self.application_generate_entity.invoke_from, invoke_from=self.application_generate_entity.invoke_from,

View File

@ -16,7 +16,7 @@ class AppGenerateResponseConverter(ABC):
def convert( def convert(
cls, response: Union[AppBlockingResponse, Generator[AppStreamResponse, Any, None]], invoke_from: InvokeFrom cls, response: Union[AppBlockingResponse, Generator[AppStreamResponse, Any, None]], invoke_from: InvokeFrom
) -> dict[str, Any] | Generator[str, Any, None]: ) -> dict[str, Any] | Generator[str, Any, None]:
if invoke_from in [InvokeFrom.DEBUGGER, InvokeFrom.SERVICE_API]: if invoke_from in {InvokeFrom.DEBUGGER, InvokeFrom.SERVICE_API}:
if isinstance(response, AppBlockingResponse): if isinstance(response, AppBlockingResponse):
return cls.convert_blocking_full_response(response) return cls.convert_blocking_full_response(response)
else: else:

View File

@ -22,11 +22,11 @@ class BaseAppGenerator:
return var.default or "" return var.default or ""
if ( if (
var.type var.type
in ( in {
VariableEntityType.TEXT_INPUT, VariableEntityType.TEXT_INPUT,
VariableEntityType.SELECT, VariableEntityType.SELECT,
VariableEntityType.PARAGRAPH, VariableEntityType.PARAGRAPH,
) }
and user_input_value and user_input_value
and not isinstance(user_input_value, str) and not isinstance(user_input_value, str)
): ):
@ -44,7 +44,7 @@ class BaseAppGenerator:
options = var.options or [] options = var.options or []
if user_input_value not in options: if user_input_value not in options:
raise ValueError(f"{var.variable} in input form must be one of the following: {options}") raise ValueError(f"{var.variable} in input form must be one of the following: {options}")
elif var.type in (VariableEntityType.TEXT_INPUT, VariableEntityType.PARAGRAPH): elif var.type in {VariableEntityType.TEXT_INPUT, VariableEntityType.PARAGRAPH}:
if var.max_length and user_input_value and len(user_input_value) > var.max_length: if var.max_length and user_input_value and len(user_input_value) > var.max_length:
raise ValueError(f"{var.variable} in input form must be less than {var.max_length} characters") raise ValueError(f"{var.variable} in input form must be less than {var.max_length} characters")

View File

@ -32,7 +32,7 @@ class AppQueueManager:
self._user_id = user_id self._user_id = user_id
self._invoke_from = invoke_from self._invoke_from = invoke_from
user_prefix = "account" if self._invoke_from in [InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER] else "end-user" user_prefix = "account" if self._invoke_from in {InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER} else "end-user"
redis_client.setex( redis_client.setex(
AppQueueManager._generate_task_belong_cache_key(self._task_id), 1800, f"{user_prefix}-{self._user_id}" AppQueueManager._generate_task_belong_cache_key(self._task_id), 1800, f"{user_prefix}-{self._user_id}"
) )
@ -118,7 +118,7 @@ class AppQueueManager:
if result is None: if result is None:
return return
user_prefix = "account" if invoke_from in [InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER] else "end-user" user_prefix = "account" if invoke_from in {InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER} else "end-user"
if result.decode("utf-8") != f"{user_prefix}-{user_id}": if result.decode("utf-8") != f"{user_prefix}-{user_id}":
return return

View File

@ -379,7 +379,7 @@ class AppRunner:
queue_manager=queue_manager, queue_manager=queue_manager,
app_generate_entity=application_generate_entity, app_generate_entity=application_generate_entity,
prompt_messages=prompt_messages, prompt_messages=prompt_messages,
text="I apologize for any confusion, " "but I'm an AI assistant to be helpful, harmless, and honest.", text="I apologize for any confusion, but I'm an AI assistant to be helpful, harmless, and honest.",
stream=application_generate_entity.stream, stream=application_generate_entity.stream,
) )

View File

@ -148,7 +148,7 @@ class MessageBasedAppGenerator(BaseAppGenerator):
# get from source # get from source
end_user_id = None end_user_id = None
account_id = None account_id = None
if application_generate_entity.invoke_from in [InvokeFrom.WEB_APP, InvokeFrom.SERVICE_API]: if application_generate_entity.invoke_from in {InvokeFrom.WEB_APP, InvokeFrom.SERVICE_API}:
from_source = "api" from_source = "api"
end_user_id = application_generate_entity.user_id end_user_id = application_generate_entity.user_id
else: else:
@ -165,11 +165,11 @@ class MessageBasedAppGenerator(BaseAppGenerator):
model_provider = application_generate_entity.model_conf.provider model_provider = application_generate_entity.model_conf.provider
model_id = application_generate_entity.model_conf.model model_id = application_generate_entity.model_conf.model
override_model_configs = None override_model_configs = None
if app_config.app_model_config_from == EasyUIBasedAppModelConfigFrom.ARGS and app_config.app_mode in [ if app_config.app_model_config_from == EasyUIBasedAppModelConfigFrom.ARGS and app_config.app_mode in {
AppMode.AGENT_CHAT, AppMode.AGENT_CHAT,
AppMode.CHAT, AppMode.CHAT,
AppMode.COMPLETION, AppMode.COMPLETION,
]: }:
override_model_configs = app_config.app_model_config_dict override_model_configs = app_config.app_model_config_dict
# get conversation introduction # get conversation introduction

View File

@ -53,7 +53,7 @@ class WorkflowAppRunner(WorkflowBasedAppRunner):
app_config = cast(WorkflowAppConfig, app_config) app_config = cast(WorkflowAppConfig, app_config)
user_id = None user_id = None
if self.application_generate_entity.invoke_from in [InvokeFrom.WEB_APP, InvokeFrom.SERVICE_API]: if self.application_generate_entity.invoke_from in {InvokeFrom.WEB_APP, InvokeFrom.SERVICE_API}:
end_user = db.session.query(EndUser).filter(EndUser.id == self.application_generate_entity.user_id).first() end_user = db.session.query(EndUser).filter(EndUser.id == self.application_generate_entity.user_id).first()
if end_user: if end_user:
user_id = end_user.session_id user_id = end_user.session_id
@ -113,7 +113,7 @@ class WorkflowAppRunner(WorkflowBasedAppRunner):
user_id=self.application_generate_entity.user_id, user_id=self.application_generate_entity.user_id,
user_from=( user_from=(
UserFrom.ACCOUNT UserFrom.ACCOUNT
if self.application_generate_entity.invoke_from in [InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER] if self.application_generate_entity.invoke_from in {InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER}
else UserFrom.END_USER else UserFrom.END_USER
), ),
invoke_from=self.application_generate_entity.invoke_from, invoke_from=self.application_generate_entity.invoke_from,

View File

@ -84,7 +84,7 @@ class WorkflowLoggingCallback(WorkflowCallback):
if route_node_state.node_run_result: if route_node_state.node_run_result:
node_run_result = route_node_state.node_run_result node_run_result = route_node_state.node_run_result
self.print_text( self.print_text(
f"Inputs: " f"{jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", f"Inputs: {jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}",
color="green", color="green",
) )
self.print_text( self.print_text(
@ -116,7 +116,7 @@ class WorkflowLoggingCallback(WorkflowCallback):
node_run_result = route_node_state.node_run_result node_run_result = route_node_state.node_run_result
self.print_text(f"Error: {node_run_result.error}", color="red") self.print_text(f"Error: {node_run_result.error}", color="red")
self.print_text( self.print_text(
f"Inputs: " f"" f"{jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", f"Inputs: {jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}",
color="red", color="red",
) )
self.print_text( self.print_text(
@ -125,7 +125,7 @@ class WorkflowLoggingCallback(WorkflowCallback):
color="red", color="red",
) )
self.print_text( self.print_text(
f"Outputs: " f"{jsonable_encoder(node_run_result.outputs) if node_run_result.outputs else ''}", f"Outputs: {jsonable_encoder(node_run_result.outputs) if node_run_result.outputs else ''}",
color="red", color="red",
) )

View File

@ -63,7 +63,7 @@ class AnnotationReplyFeature:
score = documents[0].metadata["score"] score = documents[0].metadata["score"]
annotation = AppAnnotationService.get_annotation_by_id(annotation_id) annotation = AppAnnotationService.get_annotation_by_id(annotation_id)
if annotation: if annotation:
if invoke_from in [InvokeFrom.SERVICE_API, InvokeFrom.WEB_APP]: if invoke_from in {InvokeFrom.SERVICE_API, InvokeFrom.WEB_APP}:
from_source = "api" from_source = "api"
else: else:
from_source = "console" from_source = "console"

View File

@ -372,7 +372,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleMan
self._message, self._message,
application_generate_entity=self._application_generate_entity, application_generate_entity=self._application_generate_entity,
conversation=self._conversation, conversation=self._conversation,
is_first_message=self._application_generate_entity.app_config.app_mode in [AppMode.AGENT_CHAT, AppMode.CHAT] is_first_message=self._application_generate_entity.app_config.app_mode in {AppMode.AGENT_CHAT, AppMode.CHAT}
and self._application_generate_entity.conversation_id is None, and self._application_generate_entity.conversation_id is None,
extras=self._application_generate_entity.extras, extras=self._application_generate_entity.extras,
) )

View File

@ -383,7 +383,7 @@ class WorkflowCycleManage:
:param workflow_node_execution: workflow node execution :param workflow_node_execution: workflow node execution
:return: :return:
""" """
if workflow_node_execution.node_type in [NodeType.ITERATION.value, NodeType.LOOP.value]: if workflow_node_execution.node_type in {NodeType.ITERATION.value, NodeType.LOOP.value}:
return None return None
response = NodeStartStreamResponse( response = NodeStartStreamResponse(
@ -430,7 +430,7 @@ class WorkflowCycleManage:
:param workflow_node_execution: workflow node execution :param workflow_node_execution: workflow node execution
:return: :return:
""" """
if workflow_node_execution.node_type in [NodeType.ITERATION.value, NodeType.LOOP.value]: if workflow_node_execution.node_type in {NodeType.ITERATION.value, NodeType.LOOP.value}:
return None return None
return NodeFinishStreamResponse( return NodeFinishStreamResponse(

View File

@ -29,7 +29,7 @@ class DatasetIndexToolCallbackHandler:
source="app", source="app",
source_app_id=self._app_id, source_app_id=self._app_id,
created_by_role=( created_by_role=(
"account" if self._invoke_from in [InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER] else "end_user" "account" if self._invoke_from in {InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER} else "end_user"
), ),
created_by=self._user_id, created_by=self._user_id,
) )

View File

@ -65,7 +65,7 @@ class CacheEmbedding(Embeddings):
except IntegrityError: except IntegrityError:
db.session.rollback() db.session.rollback()
except Exception as e: except Exception as e:
logging.exception("Failed transform embedding: ", e) logging.exception("Failed transform embedding: %s", e)
cache_embeddings = [] cache_embeddings = []
try: try:
for i, embedding in zip(embedding_queue_indices, embedding_queue_embeddings): for i, embedding in zip(embedding_queue_indices, embedding_queue_embeddings):
@ -85,7 +85,7 @@ class CacheEmbedding(Embeddings):
db.session.rollback() db.session.rollback()
except Exception as ex: except Exception as ex:
db.session.rollback() db.session.rollback()
logger.error("Failed to embed documents: ", ex) logger.error("Failed to embed documents: %s", ex)
raise ex raise ex
return text_embeddings return text_embeddings
@ -116,10 +116,7 @@ class CacheEmbedding(Embeddings):
# Transform to string # Transform to string
encoded_str = encoded_vector.decode("utf-8") encoded_str = encoded_vector.decode("utf-8")
redis_client.setex(embedding_cache_key, 600, encoded_str) redis_client.setex(embedding_cache_key, 600, encoded_str)
except Exception as ex:
except IntegrityError: logging.exception("Failed to add embedding to redis %s", ex)
db.session.rollback()
except:
logging.exception("Failed to add embedding to redis")
return embedding_results return embedding_results

View File

@ -292,7 +292,7 @@ class IndexingRunner:
self, index_processor: BaseIndexProcessor, dataset_document: DatasetDocument, process_rule: dict self, index_processor: BaseIndexProcessor, dataset_document: DatasetDocument, process_rule: dict
) -> list[Document]: ) -> list[Document]:
# load file # load file
if dataset_document.data_source_type not in ["upload_file", "notion_import", "website_crawl"]: if dataset_document.data_source_type not in {"upload_file", "notion_import", "website_crawl"}:
return [] return []
data_source_info = dataset_document.data_source_info_dict data_source_info = dataset_document.data_source_info_dict

View File

@ -52,7 +52,7 @@ class TokenBufferMemory:
files = db.session.query(MessageFile).filter(MessageFile.message_id == message.id).all() files = db.session.query(MessageFile).filter(MessageFile.message_id == message.id).all()
if files: if files:
file_extra_config = None file_extra_config = None
if self.conversation.mode not in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]: if self.conversation.mode not in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
file_extra_config = FileUploadConfigManager.convert(self.conversation.model_config) file_extra_config = FileUploadConfigManager.convert(self.conversation.model_config)
else: else:
if message.workflow_run_id: if message.workflow_run_id:

View File

@ -27,17 +27,17 @@ class ModelType(Enum):
:return: model type :return: model type
""" """
if origin_model_type == "text-generation" or origin_model_type == cls.LLM.value: if origin_model_type in {"text-generation", cls.LLM.value}:
return cls.LLM return cls.LLM
elif origin_model_type == "embeddings" or origin_model_type == cls.TEXT_EMBEDDING.value: elif origin_model_type in {"embeddings", cls.TEXT_EMBEDDING.value}:
return cls.TEXT_EMBEDDING return cls.TEXT_EMBEDDING
elif origin_model_type == "reranking" or origin_model_type == cls.RERANK.value: elif origin_model_type in {"reranking", cls.RERANK.value}:
return cls.RERANK return cls.RERANK
elif origin_model_type == "speech2text" or origin_model_type == cls.SPEECH2TEXT.value: elif origin_model_type in {"speech2text", cls.SPEECH2TEXT.value}:
return cls.SPEECH2TEXT return cls.SPEECH2TEXT
elif origin_model_type == "tts" or origin_model_type == cls.TTS.value: elif origin_model_type in {"tts", cls.TTS.value}:
return cls.TTS return cls.TTS
elif origin_model_type == "text2img" or origin_model_type == cls.TEXT2IMG.value: elif origin_model_type in {"text2img", cls.TEXT2IMG.value}:
return cls.TEXT2IMG return cls.TEXT2IMG
elif origin_model_type == cls.MODERATION.value: elif origin_model_type == cls.MODERATION.value:
return cls.MODERATION return cls.MODERATION

View File

@ -200,7 +200,7 @@ class AIModel(ABC):
except Exception as e: except Exception as e:
model_schema_yaml_file_name = os.path.basename(model_schema_yaml_path).rstrip(".yaml") model_schema_yaml_file_name = os.path.basename(model_schema_yaml_path).rstrip(".yaml")
raise Exception( raise Exception(
f"Invalid model schema for {provider_name}.{model_type}.{model_schema_yaml_file_name}:" f" {str(e)}" f"Invalid model schema for {provider_name}.{model_type}.{model_schema_yaml_file_name}: {str(e)}"
) )
# cache model schema # cache model schema

View File

@ -494,7 +494,7 @@ class AnthropicLargeLanguageModel(LargeLanguageModel):
mime_type = data_split[0].replace("data:", "") mime_type = data_split[0].replace("data:", "")
base64_data = data_split[1] base64_data = data_split[1]
if mime_type not in ["image/jpeg", "image/png", "image/gif", "image/webp"]: if mime_type not in {"image/jpeg", "image/png", "image/gif", "image/webp"}:
raise ValueError( raise ValueError(
f"Unsupported image type {mime_type}, " f"Unsupported image type {mime_type}, "
f"only support image/jpeg, image/png, image/gif, and image/webp" f"only support image/jpeg, image/png, image/gif, and image/webp"

View File

@ -53,6 +53,12 @@ model_credential_schema:
type: select type: select
required: true required: true
options: options:
- label:
en_US: 2024-08-01-preview
value: 2024-08-01-preview
- label:
en_US: 2024-07-01-preview
value: 2024-07-01-preview
- label: - label:
en_US: 2024-05-01-preview en_US: 2024-05-01-preview
value: 2024-05-01-preview value: 2024-05-01-preview

View File

@ -85,14 +85,14 @@ class AzureOpenAIText2SpeechModel(_CommonAzureOpenAI, TTSModel):
for i in range(len(sentences)) for i in range(len(sentences))
] ]
for future in futures: for future in futures:
yield from future.result().__enter__().iter_bytes(1024) yield from future.result().__enter__().iter_bytes(1024) # noqa:PLC2801
else: else:
response = client.audio.speech.with_streaming_response.create( response = client.audio.speech.with_streaming_response.create(
model=model, voice=voice, response_format="mp3", input=content_text.strip() model=model, voice=voice, response_format="mp3", input=content_text.strip()
) )
yield from response.__enter__().iter_bytes(1024) yield from response.__enter__().iter_bytes(1024) # noqa:PLC2801
except Exception as ex: except Exception as ex:
raise InvokeBadRequestError(str(ex)) raise InvokeBadRequestError(str(ex))

View File

@ -33,7 +33,7 @@ parameter_rules:
- name: res_format - name: res_format
label: label:
zh_Hans: 回复格式 zh_Hans: 回复格式
en_US: response format en_US: Response Format
type: string type: string
help: help:
zh_Hans: 指定模型必须输出的格式 zh_Hans: 指定模型必须输出的格式

View File

@ -33,7 +33,7 @@ parameter_rules:
- name: res_format - name: res_format
label: label:
zh_Hans: 回复格式 zh_Hans: 回复格式
en_US: response format en_US: Response Format
type: string type: string
help: help:
zh_Hans: 指定模型必须输出的格式 zh_Hans: 指定模型必须输出的格式

View File

@ -33,7 +33,7 @@ parameter_rules:
- name: res_format - name: res_format
label: label:
zh_Hans: 回复格式 zh_Hans: 回复格式
en_US: response format en_US: Response Format
type: string type: string
help: help:
zh_Hans: 指定模型必须输出的格式 zh_Hans: 指定模型必须输出的格式

View File

@ -0,0 +1,59 @@
model: eu.anthropic.claude-3-haiku-20240307-v1:0
label:
en_US: Claude 3 Haiku(EU.Cross Region Inference)
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 200000
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
parameter_rules:
- name: max_tokens
use_template: max_tokens
required: true
type: int
default: 4096
min: 1
max: 4096
help:
zh_Hans: 停止前生成的最大令牌数。请注意Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
# docs: https://docs.anthropic.com/claude/docs/system-prompts
- name: temperature
use_template: temperature
required: false
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。
en_US: The amount of randomness injected into the response.
- name: top_p
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p但不能同时更改两者。
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
- name: top_k
required: false
type: int
default: 0
min: 0
# tip docs from aws has error, max value is 500
max: 500
help:
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
pricing:
input: '0.00025'
output: '0.00125'
unit: '0.001'
currency: USD

View File

@ -0,0 +1,58 @@
model: eu.anthropic.claude-3-5-sonnet-20240620-v1:0
label:
en_US: Claude 3.5 Sonnet(EU.Cross Region Inference)
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 200000
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
parameter_rules:
- name: max_tokens
use_template: max_tokens
required: true
type: int
default: 4096
min: 1
max: 4096
help:
zh_Hans: 停止前生成的最大令牌数。请注意Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
- name: temperature
use_template: temperature
required: false
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。
en_US: The amount of randomness injected into the response.
- name: top_p
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p但不能同时更改两者。
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
- name: top_k
required: false
type: int
default: 0
min: 0
# tip docs from aws has error, max value is 500
max: 500
help:
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
pricing:
input: '0.003'
output: '0.015'
unit: '0.001'
currency: USD

View File

@ -0,0 +1,58 @@
model: eu.anthropic.claude-3-sonnet-20240229-v1:0
label:
en_US: Claude 3 Sonnet(EU.Cross Region Inference)
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 200000
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
parameter_rules:
- name: max_tokens
use_template: max_tokens
required: true
type: int
default: 4096
min: 1
max: 4096
help:
zh_Hans: 停止前生成的最大令牌数。请注意Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
- name: temperature
use_template: temperature
required: false
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。
en_US: The amount of randomness injected into the response.
- name: top_p
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p但不能同时更改两者。
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
- name: top_k
required: false
type: int
default: 0
min: 0
# tip docs from aws has error, max value is 500
max: 500
help:
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
pricing:
input: '0.003'
output: '0.015'
unit: '0.001'
currency: USD

View File

@ -61,6 +61,8 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
CONVERSE_API_ENABLED_MODEL_INFO = [ CONVERSE_API_ENABLED_MODEL_INFO = [
{"prefix": "anthropic.claude-v2", "support_system_prompts": True, "support_tool_use": False}, {"prefix": "anthropic.claude-v2", "support_system_prompts": True, "support_tool_use": False},
{"prefix": "anthropic.claude-v1", "support_system_prompts": True, "support_tool_use": False}, {"prefix": "anthropic.claude-v1", "support_system_prompts": True, "support_tool_use": False},
{"prefix": "us.anthropic.claude-3", "support_system_prompts": True, "support_tool_use": True},
{"prefix": "eu.anthropic.claude-3", "support_system_prompts": True, "support_tool_use": True},
{"prefix": "anthropic.claude-3", "support_system_prompts": True, "support_tool_use": True}, {"prefix": "anthropic.claude-3", "support_system_prompts": True, "support_tool_use": True},
{"prefix": "meta.llama", "support_system_prompts": True, "support_tool_use": False}, {"prefix": "meta.llama", "support_system_prompts": True, "support_tool_use": False},
{"prefix": "mistral.mistral-7b-instruct", "support_system_prompts": False, "support_tool_use": False}, {"prefix": "mistral.mistral-7b-instruct", "support_system_prompts": False, "support_tool_use": False},
@ -452,7 +454,7 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
base64_data = data_split[1] base64_data = data_split[1]
image_content = base64.b64decode(base64_data) image_content = base64.b64decode(base64_data)
if mime_type not in ["image/jpeg", "image/png", "image/gif", "image/webp"]: if mime_type not in {"image/jpeg", "image/png", "image/gif", "image/webp"}:
raise ValueError( raise ValueError(
f"Unsupported image type {mime_type}, " f"Unsupported image type {mime_type}, "
f"only support image/jpeg, image/png, image/gif, and image/webp" f"only support image/jpeg, image/png, image/gif, and image/webp"
@ -884,16 +886,16 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
if error_code == "AccessDeniedException": if error_code == "AccessDeniedException":
return InvokeAuthorizationError(error_msg) return InvokeAuthorizationError(error_msg)
elif error_code in ["ResourceNotFoundException", "ValidationException"]: elif error_code in {"ResourceNotFoundException", "ValidationException"}:
return InvokeBadRequestError(error_msg) return InvokeBadRequestError(error_msg)
elif error_code in ["ThrottlingException", "ServiceQuotaExceededException"]: elif error_code in {"ThrottlingException", "ServiceQuotaExceededException"}:
return InvokeRateLimitError(error_msg) return InvokeRateLimitError(error_msg)
elif error_code in [ elif error_code in {
"ModelTimeoutException", "ModelTimeoutException",
"ModelErrorException", "ModelErrorException",
"InternalServerException", "InternalServerException",
"ModelNotReadyException", "ModelNotReadyException",
]: }:
return InvokeServerUnavailableError(error_msg) return InvokeServerUnavailableError(error_msg)
elif error_code == "ModelStreamErrorException": elif error_code == "ModelStreamErrorException":
return InvokeConnectionError(error_msg) return InvokeConnectionError(error_msg)

View File

@ -0,0 +1,59 @@
model: us.anthropic.claude-3-haiku-20240307-v1:0
label:
en_US: Claude 3 Haiku(US.Cross Region Inference)
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 200000
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
parameter_rules:
- name: max_tokens
use_template: max_tokens
required: true
type: int
default: 4096
min: 1
max: 4096
help:
zh_Hans: 停止前生成的最大令牌数。请注意Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
# docs: https://docs.anthropic.com/claude/docs/system-prompts
- name: temperature
use_template: temperature
required: false
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。
en_US: The amount of randomness injected into the response.
- name: top_p
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p但不能同时更改两者。
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
- name: top_k
required: false
type: int
default: 0
min: 0
# tip docs from aws has error, max value is 500
max: 500
help:
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
pricing:
input: '0.00025'
output: '0.00125'
unit: '0.001'
currency: USD

View File

@ -0,0 +1,59 @@
model: us.anthropic.claude-3-opus-20240229-v1:0
label:
en_US: Claude 3 Opus(US.Cross Region Inference)
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 200000
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
parameter_rules:
- name: max_tokens
use_template: max_tokens
required: true
type: int
default: 4096
min: 1
max: 4096
help:
zh_Hans: 停止前生成的最大令牌数。请注意Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
# docs: https://docs.anthropic.com/claude/docs/system-prompts
- name: temperature
use_template: temperature
required: false
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。
en_US: The amount of randomness injected into the response.
- name: top_p
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p但不能同时更改两者。
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
- name: top_k
required: false
type: int
default: 0
min: 0
# tip docs from aws has error, max value is 500
max: 500
help:
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
pricing:
input: '0.015'
output: '0.075'
unit: '0.001'
currency: USD

View File

@ -0,0 +1,58 @@
model: us.anthropic.claude-3-5-sonnet-20240620-v1:0
label:
en_US: Claude 3.5 Sonnet(US.Cross Region Inference)
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 200000
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
parameter_rules:
- name: max_tokens
use_template: max_tokens
required: true
type: int
default: 4096
min: 1
max: 4096
help:
zh_Hans: 停止前生成的最大令牌数。请注意Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
- name: temperature
use_template: temperature
required: false
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。
en_US: The amount of randomness injected into the response.
- name: top_p
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p但不能同时更改两者。
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
- name: top_k
required: false
type: int
default: 0
min: 0
# tip docs from aws has error, max value is 500
max: 500
help:
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
pricing:
input: '0.003'
output: '0.015'
unit: '0.001'
currency: USD

View File

@ -0,0 +1,58 @@
model: us.anthropic.claude-3-sonnet-20240229-v1:0
label:
en_US: Claude 3 Sonnet(US.Cross Region Inference)
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 200000
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
parameter_rules:
- name: max_tokens
use_template: max_tokens
required: true
type: int
default: 4096
min: 1
max: 4096
help:
zh_Hans: 停止前生成的最大令牌数。请注意Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
- name: temperature
use_template: temperature
required: false
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。
en_US: The amount of randomness injected into the response.
- name: top_p
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p但不能同时更改两者。
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
- name: top_k
required: false
type: int
default: 0
min: 0
# tip docs from aws has error, max value is 500
max: 500
help:
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
pricing:
input: '0.003'
output: '0.015'
unit: '0.001'
currency: USD

View File

@ -186,16 +186,16 @@ class BedrockTextEmbeddingModel(TextEmbeddingModel):
if error_code == "AccessDeniedException": if error_code == "AccessDeniedException":
return InvokeAuthorizationError(error_msg) return InvokeAuthorizationError(error_msg)
elif error_code in ["ResourceNotFoundException", "ValidationException"]: elif error_code in {"ResourceNotFoundException", "ValidationException"}:
return InvokeBadRequestError(error_msg) return InvokeBadRequestError(error_msg)
elif error_code in ["ThrottlingException", "ServiceQuotaExceededException"]: elif error_code in {"ThrottlingException", "ServiceQuotaExceededException"}:
return InvokeRateLimitError(error_msg) return InvokeRateLimitError(error_msg)
elif error_code in [ elif error_code in {
"ModelTimeoutException", "ModelTimeoutException",
"ModelErrorException", "ModelErrorException",
"InternalServerException", "InternalServerException",
"ModelNotReadyException", "ModelNotReadyException",
]: }:
return InvokeServerUnavailableError(error_msg) return InvokeServerUnavailableError(error_msg)
elif error_code == "ModelStreamErrorException": elif error_code == "ModelStreamErrorException":
return InvokeConnectionError(error_msg) return InvokeConnectionError(error_msg)

View File

@ -621,7 +621,7 @@ class CohereLargeLanguageModel(LargeLanguageModel):
desc = p_val["description"] desc = p_val["description"]
if "enum" in p_val: if "enum" in p_val:
desc += f"; Only accepts one of the following predefined options: " f"[{', '.join(p_val['enum'])}]" desc += f"; Only accepts one of the following predefined options: [{', '.join(p_val['enum'])}]"
parameter_definitions[p_key] = ToolParameterDefinitionsValue( parameter_definitions[p_key] = ToolParameterDefinitionsValue(
description=desc, type=p_val["type"], required=required description=desc, type=p_val["type"], required=required

View File

@ -62,7 +62,7 @@ parameter_rules:
- name: response_format - name: response_format
label: label:
zh_Hans: 回复格式 zh_Hans: 回复格式
en_US: response_format en_US: Response Format
type: string type: string
help: help:
zh_Hans: 指定模型必须输出的格式 zh_Hans: 指定模型必须输出的格式

View File

@ -6,10 +6,10 @@ from collections.abc import Generator
from typing import Optional, Union, cast from typing import Optional, Union, cast
import google.ai.generativelanguage as glm import google.ai.generativelanguage as glm
import google.api_core.exceptions as exceptions
import google.generativeai as genai import google.generativeai as genai
import google.generativeai.client as client
import requests import requests
from google.api_core import exceptions
from google.generativeai import client
from google.generativeai.types import ContentType, GenerateContentResponse, HarmBlockThreshold, HarmCategory from google.generativeai.types import ContentType, GenerateContentResponse, HarmBlockThreshold, HarmCategory
from google.generativeai.types.content_types import to_part from google.generativeai.types.content_types import to_part
from PIL import Image from PIL import Image

View File

@ -77,7 +77,7 @@ class HuggingfaceHubLargeLanguageModel(_CommonHuggingfaceHub, LargeLanguageModel
if "huggingfacehub_api_type" not in credentials: if "huggingfacehub_api_type" not in credentials:
raise CredentialsValidateFailedError("Huggingface Hub Endpoint Type must be provided.") raise CredentialsValidateFailedError("Huggingface Hub Endpoint Type must be provided.")
if credentials["huggingfacehub_api_type"] not in ("inference_endpoints", "hosted_inference_api"): if credentials["huggingfacehub_api_type"] not in {"inference_endpoints", "hosted_inference_api"}:
raise CredentialsValidateFailedError("Huggingface Hub Endpoint Type is invalid.") raise CredentialsValidateFailedError("Huggingface Hub Endpoint Type is invalid.")
if "huggingfacehub_api_token" not in credentials: if "huggingfacehub_api_token" not in credentials:
@ -94,9 +94,9 @@ class HuggingfaceHubLargeLanguageModel(_CommonHuggingfaceHub, LargeLanguageModel
credentials["huggingfacehub_api_token"], model credentials["huggingfacehub_api_token"], model
) )
if credentials["task_type"] not in ("text2text-generation", "text-generation"): if credentials["task_type"] not in {"text2text-generation", "text-generation"}:
raise CredentialsValidateFailedError( raise CredentialsValidateFailedError(
"Huggingface Hub Task Type must be one of text2text-generation, " "text-generation." "Huggingface Hub Task Type must be one of text2text-generation, text-generation."
) )
client = InferenceClient(token=credentials["huggingfacehub_api_token"]) client = InferenceClient(token=credentials["huggingfacehub_api_token"])
@ -282,7 +282,7 @@ class HuggingfaceHubLargeLanguageModel(_CommonHuggingfaceHub, LargeLanguageModel
valid_tasks = ("text2text-generation", "text-generation") valid_tasks = ("text2text-generation", "text-generation")
if model_info.pipeline_tag not in valid_tasks: if model_info.pipeline_tag not in valid_tasks:
raise ValueError(f"Model {model_name} is not a valid task, " f"must be one of {valid_tasks}.") raise ValueError(f"Model {model_name} is not a valid task, must be one of {valid_tasks}.")
except Exception as e: except Exception as e:
raise CredentialsValidateFailedError(f"{str(e)}") raise CredentialsValidateFailedError(f"{str(e)}")

View File

@ -121,7 +121,7 @@ class HuggingfaceHubTextEmbeddingModel(_CommonHuggingfaceHub, TextEmbeddingModel
valid_tasks = "feature-extraction" valid_tasks = "feature-extraction"
if model_info.pipeline_tag not in valid_tasks: if model_info.pipeline_tag not in valid_tasks:
raise ValueError(f"Model {model_name} is not a valid task, " f"must be one of {valid_tasks}.") raise ValueError(f"Model {model_name} is not a valid task, must be one of {valid_tasks}.")
except Exception as e: except Exception as e:
raise CredentialsValidateFailedError(f"{str(e)}") raise CredentialsValidateFailedError(f"{str(e)}")

View File

@ -49,8 +49,7 @@ class HuggingfaceTeiRerankModel(RerankModel):
return RerankResult(model=model, docs=[]) return RerankResult(model=model, docs=[])
server_url = credentials["server_url"] server_url = credentials["server_url"]
if server_url.endswith("/"): server_url = server_url.removesuffix("/")
server_url = server_url[:-1]
try: try:
results = TeiHelper.invoke_rerank(server_url, query, docs) results = TeiHelper.invoke_rerank(server_url, query, docs)

View File

@ -75,7 +75,7 @@ class TeiHelper:
if len(model_type.keys()) < 1: if len(model_type.keys()) < 1:
raise RuntimeError("model_type is empty") raise RuntimeError("model_type is empty")
model_type = list(model_type.keys())[0] model_type = list(model_type.keys())[0]
if model_type not in ["embedding", "reranker"]: if model_type not in {"embedding", "reranker"}:
raise RuntimeError(f"invalid model_type: {model_type}") raise RuntimeError(f"invalid model_type: {model_type}")
max_input_length = response_json.get("max_input_length", 512) max_input_length = response_json.get("max_input_length", 512)

View File

@ -42,8 +42,7 @@ class HuggingfaceTeiTextEmbeddingModel(TextEmbeddingModel):
""" """
server_url = credentials["server_url"] server_url = credentials["server_url"]
if server_url.endswith("/"): server_url = server_url.removesuffix("/")
server_url = server_url[:-1]
# get model properties # get model properties
context_size = self._get_context_size(model, credentials) context_size = self._get_context_size(model, credentials)
@ -119,8 +118,7 @@ class HuggingfaceTeiTextEmbeddingModel(TextEmbeddingModel):
num_tokens = 0 num_tokens = 0
server_url = credentials["server_url"] server_url = credentials["server_url"]
if server_url.endswith("/"): server_url = server_url.removesuffix("/")
server_url = server_url[:-1]
batch_tokens = TeiHelper.invoke_tokenize(server_url, texts) batch_tokens = TeiHelper.invoke_tokenize(server_url, texts)
num_tokens = sum(len(tokens) for tokens in batch_tokens) num_tokens = sum(len(tokens) for tokens in batch_tokens)

View File

@ -2,3 +2,4 @@
- hunyuan-standard - hunyuan-standard
- hunyuan-standard-256k - hunyuan-standard-256k
- hunyuan-pro - hunyuan-pro
- hunyuan-turbo

View File

@ -0,0 +1,38 @@
model: hunyuan-turbo
label:
zh_Hans: hunyuan-turbo
en_US: hunyuan-turbo
model_type: llm
features:
- agent-thought
- tool-call
- multi-tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 32000
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: max_tokens
use_template: max_tokens
default: 1024
min: 1
max: 32000
- name: enable_enhance
label:
zh_Hans: 功能增强
en_US: Enable Enhancement
type: boolean
help:
zh_Hans: 功能增强(如搜索)开关,关闭时将直接由主模型生成回复内容,可以降低响应时延(对于流式输出时的首字时延尤为明显)。但在少数场景里,回复效果可能会下降。
en_US: Allow the model to perform external search to enhance the generation results.
required: false
default: true
pricing:
input: '0.015'
output: '0.05'
unit: '0.001'
currency: RMB

View File

@ -48,8 +48,7 @@ class JinaRerankModel(RerankModel):
return RerankResult(model=model, docs=[]) return RerankResult(model=model, docs=[])
base_url = credentials.get("base_url", "https://api.jina.ai/v1") base_url = credentials.get("base_url", "https://api.jina.ai/v1")
if base_url.endswith("/"): base_url = base_url.removesuffix("/")
base_url = base_url[:-1]
try: try:
response = httpx.post( response = httpx.post(

View File

@ -44,8 +44,7 @@ class JinaTextEmbeddingModel(TextEmbeddingModel):
raise CredentialsValidateFailedError("api_key is required") raise CredentialsValidateFailedError("api_key is required")
base_url = credentials.get("base_url", self.api_base) base_url = credentials.get("base_url", self.api_base)
if base_url.endswith("/"): base_url = base_url.removesuffix("/")
base_url = base_url[:-1]
url = base_url + "/embeddings" url = base_url + "/embeddings"
headers = {"Authorization": "Bearer " + api_key, "Content-Type": "application/json"} headers = {"Authorization": "Bearer " + api_key, "Content-Type": "application/json"}

View File

@ -100,9 +100,9 @@ class MinimaxChatCompletion:
return self._handle_chat_generate_response(response) return self._handle_chat_generate_response(response)
def _handle_error(self, code: int, msg: str): def _handle_error(self, code: int, msg: str):
if code == 1000 or code == 1001 or code == 1013 or code == 1027: if code in {1000, 1001, 1013, 1027}:
raise InternalServerError(msg) raise InternalServerError(msg)
elif code == 1002 or code == 1039: elif code in {1002, 1039}:
raise RateLimitReachedError(msg) raise RateLimitReachedError(msg)
elif code == 1004: elif code == 1004:
raise InvalidAuthenticationError(msg) raise InvalidAuthenticationError(msg)

View File

@ -105,9 +105,9 @@ class MinimaxChatCompletionPro:
return self._handle_chat_generate_response(response) return self._handle_chat_generate_response(response)
def _handle_error(self, code: int, msg: str): def _handle_error(self, code: int, msg: str):
if code == 1000 or code == 1001 or code == 1013 or code == 1027: if code in {1000, 1001, 1013, 1027}:
raise InternalServerError(msg) raise InternalServerError(msg)
elif code == 1002 or code == 1039: elif code in {1002, 1039}:
raise RateLimitReachedError(msg) raise RateLimitReachedError(msg)
elif code == 1004: elif code == 1004:
raise InvalidAuthenticationError(msg) raise InvalidAuthenticationError(msg)

View File

@ -114,7 +114,7 @@ class MinimaxTextEmbeddingModel(TextEmbeddingModel):
raise CredentialsValidateFailedError("Invalid api key") raise CredentialsValidateFailedError("Invalid api key")
def _handle_error(self, code: int, msg: str): def _handle_error(self, code: int, msg: str):
if code == 1000 or code == 1001: if code in {1000, 1001}:
raise InternalServerError(msg) raise InternalServerError(msg)
elif code == 1002: elif code == 1002:
raise RateLimitReachedError(msg) raise RateLimitReachedError(msg)

View File

@ -24,7 +24,7 @@ parameter_rules:
- name: response_format - name: response_format
label: label:
zh_Hans: 回复格式 zh_Hans: 回复格式
en_US: response_format en_US: Response Format
type: string type: string
help: help:
zh_Hans: 指定模型必须输出的格式 zh_Hans: 指定模型必须输出的格式

View File

@ -24,7 +24,7 @@ parameter_rules:
- name: response_format - name: response_format
label: label:
zh_Hans: 回复格式 zh_Hans: 回复格式
en_US: response_format en_US: Response Format
type: string type: string
help: help:
zh_Hans: 指定模型必须输出的格式 zh_Hans: 指定模型必须输出的格式

View File

@ -24,7 +24,7 @@ parameter_rules:
- name: response_format - name: response_format
label: label:
zh_Hans: 回复格式 zh_Hans: 回复格式
en_US: response_format en_US: Response Format
type: string type: string
help: help:
zh_Hans: 指定模型必须输出的格式 zh_Hans: 指定模型必须输出的格式

View File

@ -572,7 +572,7 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
label=I18nObject(en_US="Size of context window"), label=I18nObject(en_US="Size of context window"),
type=ParameterType.INT, type=ParameterType.INT,
help=I18nObject( help=I18nObject(
en_US="Sets the size of the context window used to generate the next token. " "(Default: 2048)" en_US="Sets the size of the context window used to generate the next token. (Default: 2048)"
), ),
default=2048, default=2048,
min=1, min=1,
@ -650,7 +650,7 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
label=I18nObject(en_US="Format"), label=I18nObject(en_US="Format"),
type=ParameterType.STRING, type=ParameterType.STRING,
help=I18nObject( help=I18nObject(
en_US="the format to return a response in." " Currently the only accepted value is json." en_US="the format to return a response in. Currently the only accepted value is json."
), ),
options=["json"], options=["json"],
), ),

View File

@ -5,6 +5,10 @@
- chatgpt-4o-latest - chatgpt-4o-latest
- gpt-4o-mini - gpt-4o-mini
- gpt-4o-mini-2024-07-18 - gpt-4o-mini-2024-07-18
- o1-preview
- o1-preview-2024-09-12
- o1-mini
- o1-mini-2024-09-12
- gpt-4-turbo - gpt-4-turbo
- gpt-4-turbo-2024-04-09 - gpt-4-turbo-2024-04-09
- gpt-4-turbo-preview - gpt-4-turbo-preview

View File

@ -28,7 +28,7 @@ parameter_rules:
- name: response_format - name: response_format
label: label:
zh_Hans: 回复格式 zh_Hans: 回复格式
en_US: response_format en_US: Response Format
type: string type: string
help: help:
zh_Hans: 指定模型必须输出的格式 zh_Hans: 指定模型必须输出的格式

View File

@ -27,7 +27,7 @@ parameter_rules:
- name: response_format - name: response_format
label: label:
zh_Hans: 回复格式 zh_Hans: 回复格式
en_US: response_format en_US: Response Format
type: string type: string
help: help:
zh_Hans: 指定模型必须输出的格式 zh_Hans: 指定模型必须输出的格式

View File

@ -27,7 +27,7 @@ parameter_rules:
- name: response_format - name: response_format
label: label:
zh_Hans: 回复格式 zh_Hans: 回复格式
en_US: response_format en_US: Response Format
type: string type: string
help: help:
zh_Hans: 指定模型必须输出的格式 zh_Hans: 指定模型必须输出的格式

View File

@ -27,7 +27,7 @@ parameter_rules:
- name: response_format - name: response_format
label: label:
zh_Hans: 回复格式 zh_Hans: 回复格式
en_US: response_format en_US: Response Format
type: string type: string
help: help:
zh_Hans: 指定模型必须输出的格式 zh_Hans: 指定模型必须输出的格式

View File

@ -40,7 +40,7 @@ parameter_rules:
- name: response_format - name: response_format
label: label:
zh_Hans: 回复格式 zh_Hans: 回复格式
en_US: response_format en_US: Response Format
type: string type: string
help: help:
zh_Hans: 指定模型必须输出的格式 zh_Hans: 指定模型必须输出的格式

View File

@ -40,7 +40,7 @@ parameter_rules:
- name: response_format - name: response_format
label: label:
zh_Hans: 回复格式 zh_Hans: 回复格式
en_US: response_format en_US: Response Format
type: string type: string
help: help:
zh_Hans: 指定模型必须输出的格式 zh_Hans: 指定模型必须输出的格式

View File

@ -40,7 +40,7 @@ parameter_rules:
- name: response_format - name: response_format
label: label:
zh_Hans: 回复格式 zh_Hans: 回复格式
en_US: response_format en_US: Response Format
type: string type: string
help: help:
zh_Hans: 指定模型必须输出的格式 zh_Hans: 指定模型必须输出的格式

View File

@ -41,7 +41,7 @@ parameter_rules:
- name: response_format - name: response_format
label: label:
zh_Hans: 回复格式 zh_Hans: 回复格式
en_US: response_format en_US: Response Format
type: string type: string
help: help:
zh_Hans: 指定模型必须输出的格式 zh_Hans: 指定模型必须输出的格式

View File

@ -40,7 +40,7 @@ parameter_rules:
- name: response_format - name: response_format
label: label:
zh_Hans: 回复格式 zh_Hans: 回复格式
en_US: response_format en_US: Response Format
type: string type: string
help: help:
zh_Hans: 指定模型必须输出的格式 zh_Hans: 指定模型必须输出的格式

View File

@ -41,7 +41,7 @@ parameter_rules:
- name: response_format - name: response_format
label: label:
zh_Hans: 回复格式 zh_Hans: 回复格式
en_US: response_format en_US: Response Format
type: string type: string
help: help:
zh_Hans: 指定模型必须输出的格式 zh_Hans: 指定模型必须输出的格式

View File

@ -38,7 +38,7 @@ parameter_rules:
- name: response_format - name: response_format
label: label:
zh_Hans: 回复格式 zh_Hans: 回复格式
en_US: response_format en_US: Response Format
type: string type: string
help: help:
zh_Hans: 指定模型必须输出的格式 zh_Hans: 指定模型必须输出的格式

Some files were not shown because too many files have changed in this diff Show More