retrieval_service.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. import concurrent.futures
  2. import json
  3. from concurrent.futures import ThreadPoolExecutor
  4. from typing import Optional
  5. from flask import Flask, current_app
  6. from sqlalchemy.orm import load_only
  7. from configs import dify_config
  8. from core.rag.data_post_processor.data_post_processor import DataPostProcessor
  9. from core.rag.datasource.keyword.keyword_factory import Keyword
  10. from core.rag.datasource.vdb.vector_factory import Vector
  11. from core.rag.embedding.retrieval import RetrievalSegments
  12. from core.rag.index_processor.constant.index_type import IndexType
  13. from core.rag.models.document import Document
  14. from core.rag.rerank.rerank_type import RerankMode
  15. from core.rag.retrieval.retrieval_methods import RetrievalMethod
  16. from extensions.ext_database import db
  17. from models.dataset import ChildChunk, Dataset, DocumentSegment
  18. from models.dataset import Document as DatasetDocument
  19. from services.external_knowledge_service import ExternalDatasetService
  20. default_retrieval_model = {
  21. "search_method": RetrievalMethod.SEMANTIC_SEARCH.value,
  22. "reranking_enable": False,
  23. "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""},
  24. "top_k": 2,
  25. "score_threshold_enabled": False,
  26. }
  27. class RetrievalService:
  28. # Cache precompiled regular expressions to avoid repeated compilation
  29. @classmethod
  30. def retrieve(
  31. cls,
  32. retrieval_method: str,
  33. dataset_id: str,
  34. query: str,
  35. top_k: int,
  36. score_threshold: Optional[float] = 0.0,
  37. reranking_model: Optional[dict] = None,
  38. reranking_mode: str = "reranking_model",
  39. weights: Optional[dict] = None,
  40. ):
  41. if not query:
  42. return []
  43. dataset = cls._get_dataset(dataset_id)
  44. if not dataset or dataset.available_document_count == 0 or dataset.available_segment_count == 0:
  45. return []
  46. all_documents: list[Document] = []
  47. exceptions: list[str] = []
  48. # Optimize multithreading with thread pools
  49. with ThreadPoolExecutor(max_workers=dify_config.RETRIEVAL_SERVICE_EXECUTORS) as executor: # type: ignore
  50. futures = []
  51. if retrieval_method == "keyword_search":
  52. futures.append(
  53. executor.submit(
  54. cls.keyword_search,
  55. flask_app=current_app._get_current_object(), # type: ignore
  56. dataset_id=dataset_id,
  57. query=query,
  58. top_k=top_k,
  59. all_documents=all_documents,
  60. exceptions=exceptions,
  61. )
  62. )
  63. if RetrievalMethod.is_support_semantic_search(retrieval_method):
  64. futures.append(
  65. executor.submit(
  66. cls.embedding_search,
  67. flask_app=current_app._get_current_object(), # type: ignore
  68. dataset_id=dataset_id,
  69. query=query,
  70. top_k=top_k,
  71. score_threshold=score_threshold,
  72. reranking_model=reranking_model,
  73. all_documents=all_documents,
  74. retrieval_method=retrieval_method,
  75. exceptions=exceptions,
  76. )
  77. )
  78. if RetrievalMethod.is_support_fulltext_search(retrieval_method):
  79. futures.append(
  80. executor.submit(
  81. cls.full_text_index_search,
  82. flask_app=current_app._get_current_object(), # type: ignore
  83. dataset_id=dataset_id,
  84. query=query,
  85. top_k=top_k,
  86. score_threshold=score_threshold,
  87. reranking_model=reranking_model,
  88. all_documents=all_documents,
  89. retrieval_method=retrieval_method,
  90. exceptions=exceptions,
  91. )
  92. )
  93. concurrent.futures.wait(futures, timeout=30, return_when=concurrent.futures.ALL_COMPLETED)
  94. if exceptions:
  95. raise ValueError(";\n".join(exceptions))
  96. if retrieval_method == RetrievalMethod.HYBRID_SEARCH.value:
  97. data_post_processor = DataPostProcessor(
  98. str(dataset.tenant_id), reranking_mode, reranking_model, weights, False
  99. )
  100. all_documents = data_post_processor.invoke(
  101. query=query,
  102. documents=all_documents,
  103. score_threshold=score_threshold,
  104. top_n=top_k,
  105. )
  106. return all_documents
  107. @classmethod
  108. def external_retrieve(cls, dataset_id: str, query: str, external_retrieval_model: Optional[dict] = None):
  109. dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
  110. if not dataset:
  111. return []
  112. all_documents = ExternalDatasetService.fetch_external_knowledge_retrieval(
  113. dataset.tenant_id, dataset_id, query, external_retrieval_model or {}
  114. )
  115. return all_documents
  116. @classmethod
  117. def _get_dataset(cls, dataset_id: str) -> Optional[Dataset]:
  118. return db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
  119. @classmethod
  120. def keyword_search(
  121. cls, flask_app: Flask, dataset_id: str, query: str, top_k: int, all_documents: list, exceptions: list
  122. ):
  123. with flask_app.app_context():
  124. try:
  125. dataset = cls._get_dataset(dataset_id)
  126. if not dataset:
  127. raise ValueError("dataset not found")
  128. keyword = Keyword(dataset=dataset)
  129. documents = keyword.search(cls.escape_query_for_search(query), top_k=top_k)
  130. all_documents.extend(documents)
  131. except Exception as e:
  132. exceptions.append(str(e))
  133. @classmethod
  134. def embedding_search(
  135. cls,
  136. flask_app: Flask,
  137. dataset_id: str,
  138. query: str,
  139. top_k: int,
  140. score_threshold: Optional[float],
  141. reranking_model: Optional[dict],
  142. all_documents: list,
  143. retrieval_method: str,
  144. exceptions: list,
  145. ):
  146. with flask_app.app_context():
  147. try:
  148. dataset = cls._get_dataset(dataset_id)
  149. if not dataset:
  150. raise ValueError("dataset not found")
  151. vector = Vector(dataset=dataset)
  152. documents = vector.search_by_vector(
  153. query,
  154. search_type="similarity_score_threshold",
  155. top_k=top_k,
  156. score_threshold=score_threshold,
  157. filter={"group_id": [dataset.id]},
  158. )
  159. if documents:
  160. if (
  161. reranking_model
  162. and reranking_model.get("reranking_model_name")
  163. and reranking_model.get("reranking_provider_name")
  164. and retrieval_method == RetrievalMethod.SEMANTIC_SEARCH.value
  165. ):
  166. data_post_processor = DataPostProcessor(
  167. str(dataset.tenant_id), str(RerankMode.RERANKING_MODEL.value), reranking_model, None, False
  168. )
  169. all_documents.extend(
  170. data_post_processor.invoke(
  171. query=query,
  172. documents=documents,
  173. score_threshold=score_threshold,
  174. top_n=len(documents),
  175. )
  176. )
  177. else:
  178. all_documents.extend(documents)
  179. except Exception as e:
  180. exceptions.append(str(e))
  181. @classmethod
  182. def full_text_index_search(
  183. cls,
  184. flask_app: Flask,
  185. dataset_id: str,
  186. query: str,
  187. top_k: int,
  188. score_threshold: Optional[float],
  189. reranking_model: Optional[dict],
  190. all_documents: list,
  191. retrieval_method: str,
  192. exceptions: list,
  193. ):
  194. with flask_app.app_context():
  195. try:
  196. dataset = cls._get_dataset(dataset_id)
  197. if not dataset:
  198. raise ValueError("dataset not found")
  199. vector_processor = Vector(dataset=dataset)
  200. documents = vector_processor.search_by_full_text(cls.escape_query_for_search(query), top_k=top_k)
  201. if documents:
  202. if (
  203. reranking_model
  204. and reranking_model.get("reranking_model_name")
  205. and reranking_model.get("reranking_provider_name")
  206. and retrieval_method == RetrievalMethod.FULL_TEXT_SEARCH.value
  207. ):
  208. data_post_processor = DataPostProcessor(
  209. str(dataset.tenant_id), str(RerankMode.RERANKING_MODEL.value), reranking_model, None, False
  210. )
  211. all_documents.extend(
  212. data_post_processor.invoke(
  213. query=query,
  214. documents=documents,
  215. score_threshold=score_threshold,
  216. top_n=len(documents),
  217. )
  218. )
  219. else:
  220. all_documents.extend(documents)
  221. except Exception as e:
  222. exceptions.append(str(e))
  223. @staticmethod
  224. def escape_query_for_search(query: str) -> str:
  225. return json.dumps(query).strip('"')
  226. @classmethod
  227. def format_retrieval_documents(cls, documents: list[Document]) -> list[RetrievalSegments]:
  228. """Format retrieval documents with optimized batch processing"""
  229. if not documents:
  230. return []
  231. try:
  232. # Collect document IDs
  233. document_ids = {doc.metadata.get("document_id") for doc in documents if "document_id" in doc.metadata}
  234. if not document_ids:
  235. return []
  236. # Batch query dataset documents
  237. dataset_documents = {
  238. doc.id: doc
  239. for doc in db.session.query(DatasetDocument)
  240. .filter(DatasetDocument.id.in_(document_ids))
  241. .options(load_only(DatasetDocument.id, DatasetDocument.doc_form, DatasetDocument.dataset_id))
  242. .all()
  243. }
  244. records = []
  245. include_segment_ids = set()
  246. segment_child_map = {}
  247. # Process documents
  248. for document in documents:
  249. document_id = document.metadata.get("document_id")
  250. if document_id not in dataset_documents:
  251. continue
  252. dataset_document = dataset_documents[document_id]
  253. if dataset_document.doc_form == IndexType.PARENT_CHILD_INDEX:
  254. # Handle parent-child documents
  255. child_index_node_id = document.metadata.get("doc_id")
  256. child_chunk = (
  257. db.session.query(ChildChunk).filter(ChildChunk.index_node_id == child_index_node_id).first()
  258. )
  259. if not child_chunk:
  260. continue
  261. segment = (
  262. db.session.query(DocumentSegment)
  263. .filter(
  264. DocumentSegment.dataset_id == dataset_document.dataset_id,
  265. DocumentSegment.enabled == True,
  266. DocumentSegment.status == "completed",
  267. DocumentSegment.id == child_chunk.segment_id,
  268. )
  269. .options(
  270. load_only(
  271. DocumentSegment.id,
  272. DocumentSegment.content,
  273. DocumentSegment.answer,
  274. )
  275. )
  276. .first()
  277. )
  278. if not segment:
  279. continue
  280. if segment.id not in include_segment_ids:
  281. include_segment_ids.add(segment.id)
  282. child_chunk_detail = {
  283. "id": child_chunk.id,
  284. "content": child_chunk.content,
  285. "position": child_chunk.position,
  286. "score": document.metadata.get("score", 0.0),
  287. }
  288. map_detail = {
  289. "max_score": document.metadata.get("score", 0.0),
  290. "child_chunks": [child_chunk_detail],
  291. }
  292. segment_child_map[segment.id] = map_detail
  293. record = {
  294. "segment": segment,
  295. }
  296. records.append(record)
  297. else:
  298. child_chunk_detail = {
  299. "id": child_chunk.id,
  300. "content": child_chunk.content,
  301. "position": child_chunk.position,
  302. "score": document.metadata.get("score", 0.0),
  303. }
  304. segment_child_map[segment.id]["child_chunks"].append(child_chunk_detail)
  305. segment_child_map[segment.id]["max_score"] = max(
  306. segment_child_map[segment.id]["max_score"], document.metadata.get("score", 0.0)
  307. )
  308. else:
  309. # Handle normal documents
  310. index_node_id = document.metadata.get("doc_id")
  311. if not index_node_id:
  312. continue
  313. segment = (
  314. db.session.query(DocumentSegment)
  315. .filter(
  316. DocumentSegment.dataset_id == dataset_document.dataset_id,
  317. DocumentSegment.enabled == True,
  318. DocumentSegment.status == "completed",
  319. DocumentSegment.index_node_id == index_node_id,
  320. )
  321. .first()
  322. )
  323. if not segment:
  324. continue
  325. include_segment_ids.add(segment.id)
  326. record = {
  327. "segment": segment,
  328. "score": document.metadata.get("score"), # type: ignore
  329. }
  330. records.append(record)
  331. # Add child chunks information to records
  332. for record in records:
  333. if record["segment"].id in segment_child_map:
  334. record["child_chunks"] = segment_child_map[record["segment"].id].get("child_chunks") # type: ignore
  335. record["score"] = segment_child_map[record["segment"].id]["max_score"]
  336. return [RetrievalSegments(**record) for record in records]
  337. except Exception as e:
  338. db.session.rollback()
  339. raise e