update dataset embedding model, update document status to be indexing (#7145)
This commit is contained in:
parent
f667ef98cb
commit
c6b0dc6a29
@ -42,31 +42,42 @@ def deal_dataset_vector_index_task(dataset_id: str, action: str):
|
|||||||
).all()
|
).all()
|
||||||
|
|
||||||
if dataset_documents:
|
if dataset_documents:
|
||||||
documents = []
|
dataset_documents_ids = [doc.id for doc in dataset_documents]
|
||||||
|
db.session.query(DatasetDocument).filter(DatasetDocument.id.in_(dataset_documents_ids)) \
|
||||||
|
.update({"indexing_status": "indexing"}, synchronize_session=False)
|
||||||
|
db.session.commit()
|
||||||
|
|
||||||
for dataset_document in dataset_documents:
|
for dataset_document in dataset_documents:
|
||||||
# delete from vector index
|
try:
|
||||||
segments = db.session.query(DocumentSegment).filter(
|
# add from vector index
|
||||||
DocumentSegment.document_id == dataset_document.id,
|
segments = db.session.query(DocumentSegment).filter(
|
||||||
DocumentSegment.enabled == True
|
DocumentSegment.document_id == dataset_document.id,
|
||||||
) .order_by(DocumentSegment.position.asc()).all()
|
DocumentSegment.enabled == True
|
||||||
for segment in segments:
|
) .order_by(DocumentSegment.position.asc()).all()
|
||||||
document = Document(
|
if segments:
|
||||||
page_content=segment.content,
|
documents = []
|
||||||
metadata={
|
for segment in segments:
|
||||||
"doc_id": segment.index_node_id,
|
document = Document(
|
||||||
"doc_hash": segment.index_node_hash,
|
page_content=segment.content,
|
||||||
"document_id": segment.document_id,
|
metadata={
|
||||||
"dataset_id": segment.dataset_id,
|
"doc_id": segment.index_node_id,
|
||||||
}
|
"doc_hash": segment.index_node_hash,
|
||||||
)
|
"document_id": segment.document_id,
|
||||||
|
"dataset_id": segment.dataset_id,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
documents.append(document)
|
documents.append(document)
|
||||||
|
# save vector index
|
||||||
# save vector index
|
index_processor.load(dataset, documents, with_keywords=False)
|
||||||
index_processor.load(dataset, documents, with_keywords=False)
|
db.session.query(DatasetDocument).filter(DatasetDocument.id == dataset_document.id) \
|
||||||
|
.update({"indexing_status": "completed"}, synchronize_session=False)
|
||||||
|
db.session.commit()
|
||||||
|
except Exception as e:
|
||||||
|
db.session.query(DatasetDocument).filter(DatasetDocument.id == dataset_document.id) \
|
||||||
|
.update({"indexing_status": "error", "error": str(e)}, synchronize_session=False)
|
||||||
|
db.session.commit()
|
||||||
elif action == 'update':
|
elif action == 'update':
|
||||||
# clean index
|
|
||||||
index_processor.clean(dataset, None, with_keywords=False)
|
|
||||||
dataset_documents = db.session.query(DatasetDocument).filter(
|
dataset_documents = db.session.query(DatasetDocument).filter(
|
||||||
DatasetDocument.dataset_id == dataset_id,
|
DatasetDocument.dataset_id == dataset_id,
|
||||||
DatasetDocument.indexing_status == 'completed',
|
DatasetDocument.indexing_status == 'completed',
|
||||||
@ -75,28 +86,46 @@ def deal_dataset_vector_index_task(dataset_id: str, action: str):
|
|||||||
).all()
|
).all()
|
||||||
# add new index
|
# add new index
|
||||||
if dataset_documents:
|
if dataset_documents:
|
||||||
documents = []
|
# update document status
|
||||||
|
dataset_documents_ids = [doc.id for doc in dataset_documents]
|
||||||
|
db.session.query(DatasetDocument).filter(DatasetDocument.id.in_(dataset_documents_ids)) \
|
||||||
|
.update({"indexing_status": "indexing"}, synchronize_session=False)
|
||||||
|
db.session.commit()
|
||||||
|
|
||||||
|
# clean index
|
||||||
|
index_processor.clean(dataset, None, with_keywords=False)
|
||||||
|
|
||||||
for dataset_document in dataset_documents:
|
for dataset_document in dataset_documents:
|
||||||
# delete from vector index
|
# update from vector index
|
||||||
segments = db.session.query(DocumentSegment).filter(
|
try:
|
||||||
DocumentSegment.document_id == dataset_document.id,
|
segments = db.session.query(DocumentSegment).filter(
|
||||||
DocumentSegment.enabled == True
|
DocumentSegment.document_id == dataset_document.id,
|
||||||
).order_by(DocumentSegment.position.asc()).all()
|
DocumentSegment.enabled == True
|
||||||
for segment in segments:
|
).order_by(DocumentSegment.position.asc()).all()
|
||||||
document = Document(
|
if segments:
|
||||||
page_content=segment.content,
|
documents = []
|
||||||
metadata={
|
for segment in segments:
|
||||||
"doc_id": segment.index_node_id,
|
document = Document(
|
||||||
"doc_hash": segment.index_node_hash,
|
page_content=segment.content,
|
||||||
"document_id": segment.document_id,
|
metadata={
|
||||||
"dataset_id": segment.dataset_id,
|
"doc_id": segment.index_node_id,
|
||||||
}
|
"doc_hash": segment.index_node_hash,
|
||||||
)
|
"document_id": segment.document_id,
|
||||||
|
"dataset_id": segment.dataset_id,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
documents.append(document)
|
documents.append(document)
|
||||||
|
# save vector index
|
||||||
|
index_processor.load(dataset, documents, with_keywords=False)
|
||||||
|
db.session.query(DatasetDocument).filter(DatasetDocument.id == dataset_document.id) \
|
||||||
|
.update({"indexing_status": "completed"}, synchronize_session=False)
|
||||||
|
db.session.commit()
|
||||||
|
except Exception as e:
|
||||||
|
db.session.query(DatasetDocument).filter(DatasetDocument.id == dataset_document.id) \
|
||||||
|
.update({"indexing_status": "error", "error": str(e)}, synchronize_session=False)
|
||||||
|
db.session.commit()
|
||||||
|
|
||||||
# save vector index
|
|
||||||
index_processor.load(dataset, documents, with_keywords=False)
|
|
||||||
|
|
||||||
end_at = time.perf_counter()
|
end_at = time.perf_counter()
|
||||||
logging.info(
|
logging.info(
|
||||||
|
Loading…
Reference in New Issue
Block a user