Compare commits

...

3 Commits

Author SHA1 Message Date
jyong
0885c2ad64 optimize the unused dataset clean logic 2024-07-17 17:30:27 +08:00
jyong
b44c69ddc3 update celery beat scheduler time to env 2024-07-17 02:29:24 +08:00
jyong
4fdc162c45 update celery beat scheduler time to env 2024-07-17 02:25:07 +08:00
4 changed files with 71 additions and 30 deletions

View File

@ -256,3 +256,7 @@ WORKFLOW_CALL_MAX_DEPTH=5
# App configuration # App configuration
APP_MAX_EXECUTION_TIME=1200 APP_MAX_EXECUTION_TIME=1200
APP_MAX_ACTIVE_REQUESTS=0 APP_MAX_ACTIVE_REQUESTS=0
# Celery beat configuration
CELERY_BEAT_SCHEDULER_TIME=1

View File

@ -23,6 +23,7 @@ class SecurityConfig(BaseSettings):
default=24, default=24,
) )
class AppExecutionConfig(BaseSettings): class AppExecutionConfig(BaseSettings):
""" """
App Execution configs App Execution configs
@ -435,6 +436,13 @@ class ImageFormatConfig(BaseSettings):
) )
class CeleryBeatConfig(BaseSettings):
CELERY_BEAT_SCHEDULER_TIME: int = Field(
description='the time of the celery scheduler, default to 1 day',
default=1,
)
class FeatureConfig( class FeatureConfig(
# place the configs in alphabet order # place the configs in alphabet order
AppExecutionConfig, AppExecutionConfig,
@ -462,5 +470,6 @@ class FeatureConfig(
# hosted services config # hosted services config
HostedServiceConfig, HostedServiceConfig,
CeleryBeatConfig,
): ):
pass pass

View File

@ -43,15 +43,15 @@ def init_app(app: Flask) -> Celery:
"schedule.clean_embedding_cache_task", "schedule.clean_embedding_cache_task",
"schedule.clean_unused_datasets_task", "schedule.clean_unused_datasets_task",
] ]
day = app.config["CELERY_BEAT_SCHEDULER_TIME"]
beat_schedule = { beat_schedule = {
'clean_embedding_cache_task': { 'clean_embedding_cache_task': {
'task': 'schedule.clean_embedding_cache_task.clean_embedding_cache_task', 'task': 'schedule.clean_embedding_cache_task.clean_embedding_cache_task',
'schedule': timedelta(days=1), 'schedule': timedelta(days=day),
}, },
'clean_unused_datasets_task': { 'clean_unused_datasets_task': {
'task': 'schedule.clean_unused_datasets_task.clean_unused_datasets_task', 'task': 'schedule.clean_unused_datasets_task.clean_unused_datasets_task',
'schedule': timedelta(minutes=3), 'schedule': timedelta(minutes=1),
} }
} }
celery_app.conf.update( celery_app.conf.update(

View File

@ -9,7 +9,7 @@ from configs import dify_config
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from extensions.ext_database import db from extensions.ext_database import db
from models.dataset import Dataset, DatasetQuery, Document from models.dataset import Dataset, DatasetQuery, Document
from sqlalchemy import func
@app.celery.task(queue='dataset') @app.celery.task(queue='dataset')
def clean_unused_datasets_task(): def clean_unused_datasets_task():
@ -20,10 +20,46 @@ def clean_unused_datasets_task():
page = 1 page = 1
while True: while True:
try: try:
datasets = db.session.query(Dataset).filter(Dataset.created_at < thirty_days_ago) \ # Subquery for counting new documents
.order_by(Dataset.created_at.desc()).paginate(page=page, per_page=50) document_subquery_new = db.session.query(
Document.dataset_id,
func.count(Document.id).label('document_count')
).filter(
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False,
Document.updated_at > thirty_days_ago
).group_by(Document.dataset_id).subquery()
# Subquery for counting old documents
document_subquery_old = db.session.query(
Document.dataset_id,
func.count(Document.id).label('document_count')
).filter(
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False,
Document.updated_at < thirty_days_ago
).group_by(Document.dataset_id).subquery()
# Main query with join and filter
datasets = (db.session.query(Dataset)
.outerjoin(
document_subquery_new, Dataset.id == document_subquery_new.c.dataset_id
).outerjoin(
document_subquery_old, Dataset.id == document_subquery_old.c.dataset_id
).filter(
Dataset.created_at < thirty_days_ago,
func.coalesce(document_subquery_new.c.document_count, 0) == 0,
func.coalesce(document_subquery_old.c.document_count, 0) > 0
).order_by(
Dataset.created_at.desc()
).paginate(page=page, per_page=50))
except NotFound: except NotFound:
break break
if datasets.items is None or len(datasets.items) == 0:
break
page += 1 page += 1
for dataset in datasets: for dataset in datasets:
dataset_query = db.session.query(DatasetQuery).filter( dataset_query = db.session.query(DatasetQuery).filter(
@ -31,14 +67,6 @@ def clean_unused_datasets_task():
DatasetQuery.dataset_id == dataset.id DatasetQuery.dataset_id == dataset.id
).all() ).all()
if not dataset_query or len(dataset_query) == 0: if not dataset_query or len(dataset_query) == 0:
documents = db.session.query(Document).filter(
Document.dataset_id == dataset.id,
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False,
Document.updated_at > thirty_days_ago
).all()
if not documents or len(documents) == 0:
try: try:
# remove index # remove index
index_processor = IndexProcessorFactory(dataset.doc_form).init_index_processor() index_processor = IndexProcessorFactory(dataset.doc_form).init_index_processor()