dify/api/core/helper/moderation.py

62 lines
2.7 KiB
Python
Raw Normal View History

2023-09-12 10:26:12 +08:00
import logging
import random
2024-11-11 20:31:11 +08:00
from typing import cast
2023-09-12 10:26:12 +08:00
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
2024-11-11 20:31:11 +08:00
from core.entities import DEFAULT_PLUGIN_ID
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.errors.invoke import InvokeBadRequestError
2024-11-11 20:31:11 +08:00
from core.model_runtime.model_providers.__base.moderation_model import ModerationModel
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
from extensions.ext_hosting_provider import hosting_configuration
2023-09-12 10:26:12 +08:00
from models.provider import ProviderType
logger = logging.getLogger(__name__)
2024-11-11 20:31:11 +08:00
def check_moderation(tenant_id: str, model_config: ModelConfigWithCredentialsEntity, text: str) -> bool:
moderation_config = hosting_configuration.moderation_config
2024-11-11 20:31:11 +08:00
openai_provider_name = f"{DEFAULT_PLUGIN_ID}/openai/openai"
if (
moderation_config
and moderation_config.enabled is True
2024-11-11 20:31:11 +08:00
and openai_provider_name in hosting_configuration.provider_map
and hosting_configuration.provider_map[openai_provider_name].enabled is True
):
using_provider_type = model_config.provider_model_bundle.configuration.using_provider_type
provider_name = model_config.provider
if using_provider_type == ProviderType.SYSTEM and provider_name in moderation_config.providers:
2024-11-11 20:31:11 +08:00
hosting_openai_config = hosting_configuration.provider_map[openai_provider_name]
2023-09-12 10:26:12 +08:00
if hosting_openai_config.credentials is None:
return False
2023-09-12 10:26:12 +08:00
# 2000 text per chunk
length = 2000
text_chunks = [text[i : i + length] for i in range(0, len(text), length)]
2023-09-18 17:32:31 +08:00
if len(text_chunks) == 0:
return True
2023-09-18 17:32:31 +08:00
text_chunk = random.choice(text_chunks)
2023-09-18 17:32:31 +08:00
try:
2024-11-11 20:31:11 +08:00
model_provider_factory = ModelProviderFactory(tenant_id)
# Get model instance of LLM
model_type_instance = model_provider_factory.get_model_type_instance(
provider=openai_provider_name, model_type=ModelType.MODERATION
)
model_type_instance = cast(ModerationModel, model_type_instance)
moderation_result = model_type_instance.invoke(
2024-11-11 20:31:11 +08:00
model="omni-moderation-latest", credentials=hosting_openai_config.credentials, text=text_chunk
)
if moderation_result is True:
return True
except Exception as ex:
logger.exception(f"Fails to check moderation, provider_name: {provider_name}")
raise InvokeBadRequestError("Rate limit exceeded, please try again later.")
2023-09-12 10:26:12 +08:00
return False