retrieval_service.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397
  1. import concurrent.futures
  2. from concurrent.futures import ThreadPoolExecutor
  3. from typing import Optional
  4. from flask import Flask, current_app
  5. from sqlalchemy.orm import load_only
  6. from configs import dify_config
  7. from core.rag.data_post_processor.data_post_processor import DataPostProcessor
  8. from core.rag.datasource.keyword.keyword_factory import Keyword
  9. from core.rag.datasource.vdb.vector_factory import Vector
  10. from core.rag.embedding.retrieval import RetrievalSegments
  11. from core.rag.index_processor.constant.index_type import IndexType
  12. from core.rag.models.document import Document
  13. from core.rag.rerank.rerank_type import RerankMode
  14. from core.rag.retrieval.retrieval_methods import RetrievalMethod
  15. from extensions.ext_database import db
  16. from models.dataset import ChildChunk, Dataset, DocumentSegment
  17. from models.dataset import Document as DatasetDocument
  18. from services.external_knowledge_service import ExternalDatasetService
  19. default_retrieval_model = {
  20. "search_method": RetrievalMethod.SEMANTIC_SEARCH.value,
  21. "reranking_enable": False,
  22. "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""},
  23. "top_k": 2,
  24. "score_threshold_enabled": False,
  25. }
  26. class RetrievalService:
  27. # Cache precompiled regular expressions to avoid repeated compilation
  28. @classmethod
  29. def retrieve(
  30. cls,
  31. retrieval_method: str,
  32. dataset_id: str,
  33. query: str,
  34. top_k: int,
  35. score_threshold: Optional[float] = 0.0,
  36. reranking_model: Optional[dict] = None,
  37. reranking_mode: str = "reranking_model",
  38. weights: Optional[dict] = None,
  39. document_ids_filter: Optional[list[str]] = None,
  40. ):
  41. if not query:
  42. return []
  43. dataset = cls._get_dataset(dataset_id)
  44. if not dataset or dataset.available_document_count == 0 or dataset.available_segment_count == 0:
  45. return []
  46. all_documents: list[Document] = []
  47. exceptions: list[str] = []
  48. # Optimize multithreading with thread pools
  49. with ThreadPoolExecutor(max_workers=dify_config.RETRIEVAL_SERVICE_EXECUTORS) as executor: # type: ignore
  50. futures = []
  51. if retrieval_method == "keyword_search":
  52. futures.append(
  53. executor.submit(
  54. cls.keyword_search,
  55. flask_app=current_app._get_current_object(), # type: ignore
  56. dataset_id=dataset_id,
  57. query=query,
  58. top_k=top_k,
  59. all_documents=all_documents,
  60. exceptions=exceptions,
  61. document_ids_filter=document_ids_filter,
  62. )
  63. )
  64. if RetrievalMethod.is_support_semantic_search(retrieval_method):
  65. futures.append(
  66. executor.submit(
  67. cls.embedding_search,
  68. flask_app=current_app._get_current_object(), # type: ignore
  69. dataset_id=dataset_id,
  70. query=query,
  71. top_k=top_k,
  72. score_threshold=score_threshold,
  73. reranking_model=reranking_model,
  74. all_documents=all_documents,
  75. retrieval_method=retrieval_method,
  76. exceptions=exceptions,
  77. document_ids_filter=document_ids_filter,
  78. )
  79. )
  80. if RetrievalMethod.is_support_fulltext_search(retrieval_method):
  81. futures.append(
  82. executor.submit(
  83. cls.full_text_index_search,
  84. flask_app=current_app._get_current_object(), # type: ignore
  85. dataset_id=dataset_id,
  86. query=query,
  87. top_k=top_k,
  88. score_threshold=score_threshold,
  89. reranking_model=reranking_model,
  90. all_documents=all_documents,
  91. retrieval_method=retrieval_method,
  92. exceptions=exceptions,
  93. document_ids_filter=document_ids_filter,
  94. )
  95. )
  96. concurrent.futures.wait(futures, timeout=30, return_when=concurrent.futures.ALL_COMPLETED)
  97. if exceptions:
  98. raise ValueError(";\n".join(exceptions))
  99. if retrieval_method == RetrievalMethod.HYBRID_SEARCH.value:
  100. data_post_processor = DataPostProcessor(
  101. str(dataset.tenant_id), reranking_mode, reranking_model, weights, False
  102. )
  103. all_documents = data_post_processor.invoke(
  104. query=query,
  105. documents=all_documents,
  106. score_threshold=score_threshold,
  107. top_n=top_k,
  108. )
  109. return all_documents
  110. @classmethod
  111. def external_retrieve(cls, dataset_id: str, query: str, external_retrieval_model: Optional[dict] = None):
  112. dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
  113. if not dataset:
  114. return []
  115. all_documents = ExternalDatasetService.fetch_external_knowledge_retrieval(
  116. dataset.tenant_id, dataset_id, query, external_retrieval_model or {}
  117. )
  118. return all_documents
  119. @classmethod
  120. def _get_dataset(cls, dataset_id: str) -> Optional[Dataset]:
  121. return db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
  122. @classmethod
  123. def keyword_search(
  124. cls,
  125. flask_app: Flask,
  126. dataset_id: str,
  127. query: str,
  128. top_k: int,
  129. all_documents: list,
  130. exceptions: list,
  131. document_ids_filter: Optional[list[str]] = None,
  132. ):
  133. with flask_app.app_context():
  134. try:
  135. dataset = cls._get_dataset(dataset_id)
  136. if not dataset:
  137. raise ValueError("dataset not found")
  138. keyword = Keyword(dataset=dataset)
  139. documents = keyword.search(
  140. cls.escape_query_for_search(query), top_k=top_k, document_ids_filter=document_ids_filter
  141. )
  142. all_documents.extend(documents)
  143. except Exception as e:
  144. exceptions.append(str(e))
  145. @classmethod
  146. def embedding_search(
  147. cls,
  148. flask_app: Flask,
  149. dataset_id: str,
  150. query: str,
  151. top_k: int,
  152. score_threshold: Optional[float],
  153. reranking_model: Optional[dict],
  154. all_documents: list,
  155. retrieval_method: str,
  156. exceptions: list,
  157. document_ids_filter: Optional[list[str]] = None,
  158. ):
  159. with flask_app.app_context():
  160. try:
  161. dataset = cls._get_dataset(dataset_id)
  162. if not dataset:
  163. raise ValueError("dataset not found")
  164. vector = Vector(dataset=dataset)
  165. documents = vector.search_by_vector(
  166. query,
  167. search_type="similarity_score_threshold",
  168. top_k=top_k,
  169. score_threshold=score_threshold,
  170. filter={"group_id": [dataset.id]},
  171. document_ids_filter=document_ids_filter,
  172. )
  173. if documents:
  174. if (
  175. reranking_model
  176. and reranking_model.get("reranking_model_name")
  177. and reranking_model.get("reranking_provider_name")
  178. and retrieval_method == RetrievalMethod.SEMANTIC_SEARCH.value
  179. ):
  180. data_post_processor = DataPostProcessor(
  181. str(dataset.tenant_id), str(RerankMode.RERANKING_MODEL.value), reranking_model, None, False
  182. )
  183. all_documents.extend(
  184. data_post_processor.invoke(
  185. query=query,
  186. documents=documents,
  187. score_threshold=score_threshold,
  188. top_n=len(documents),
  189. )
  190. )
  191. else:
  192. all_documents.extend(documents)
  193. except Exception as e:
  194. exceptions.append(str(e))
  195. @classmethod
  196. def full_text_index_search(
  197. cls,
  198. flask_app: Flask,
  199. dataset_id: str,
  200. query: str,
  201. top_k: int,
  202. score_threshold: Optional[float],
  203. reranking_model: Optional[dict],
  204. all_documents: list,
  205. retrieval_method: str,
  206. exceptions: list,
  207. document_ids_filter: Optional[list[str]] = None,
  208. ):
  209. with flask_app.app_context():
  210. try:
  211. dataset = cls._get_dataset(dataset_id)
  212. if not dataset:
  213. raise ValueError("dataset not found")
  214. vector_processor = Vector(dataset=dataset)
  215. documents = vector_processor.search_by_full_text(
  216. cls.escape_query_for_search(query), top_k=top_k, document_ids_filter=document_ids_filter
  217. )
  218. if documents:
  219. if (
  220. reranking_model
  221. and reranking_model.get("reranking_model_name")
  222. and reranking_model.get("reranking_provider_name")
  223. and retrieval_method == RetrievalMethod.FULL_TEXT_SEARCH.value
  224. ):
  225. data_post_processor = DataPostProcessor(
  226. str(dataset.tenant_id), str(RerankMode.RERANKING_MODEL.value), reranking_model, None, False
  227. )
  228. all_documents.extend(
  229. data_post_processor.invoke(
  230. query=query,
  231. documents=documents,
  232. score_threshold=score_threshold,
  233. top_n=len(documents),
  234. )
  235. )
  236. else:
  237. all_documents.extend(documents)
  238. except Exception as e:
  239. exceptions.append(str(e))
  240. @staticmethod
  241. def escape_query_for_search(query: str) -> str:
  242. return query.replace('"', '\\"')
  243. @classmethod
  244. def format_retrieval_documents(cls, documents: list[Document]) -> list[RetrievalSegments]:
  245. """Format retrieval documents with optimized batch processing"""
  246. if not documents:
  247. return []
  248. try:
  249. # Collect document IDs
  250. document_ids = {doc.metadata.get("document_id") for doc in documents if "document_id" in doc.metadata}
  251. if not document_ids:
  252. return []
  253. # Batch query dataset documents
  254. dataset_documents = {
  255. doc.id: doc
  256. for doc in db.session.query(DatasetDocument)
  257. .filter(DatasetDocument.id.in_(document_ids))
  258. .options(load_only(DatasetDocument.id, DatasetDocument.doc_form, DatasetDocument.dataset_id))
  259. .all()
  260. }
  261. records = []
  262. include_segment_ids = set()
  263. segment_child_map = {}
  264. # Process documents
  265. for document in documents:
  266. document_id = document.metadata.get("document_id")
  267. if document_id not in dataset_documents:
  268. continue
  269. dataset_document = dataset_documents[document_id]
  270. if not dataset_document:
  271. continue
  272. if dataset_document.doc_form == IndexType.PARENT_CHILD_INDEX:
  273. # Handle parent-child documents
  274. child_index_node_id = document.metadata.get("doc_id")
  275. child_chunk = (
  276. db.session.query(ChildChunk).filter(ChildChunk.index_node_id == child_index_node_id).first()
  277. )
  278. if not child_chunk:
  279. continue
  280. segment = (
  281. db.session.query(DocumentSegment)
  282. .filter(
  283. DocumentSegment.dataset_id == dataset_document.dataset_id,
  284. DocumentSegment.enabled == True,
  285. DocumentSegment.status == "completed",
  286. DocumentSegment.id == child_chunk.segment_id,
  287. )
  288. .options(
  289. load_only(
  290. DocumentSegment.id,
  291. DocumentSegment.content,
  292. DocumentSegment.answer,
  293. )
  294. )
  295. .first()
  296. )
  297. if not segment:
  298. continue
  299. if segment.id not in include_segment_ids:
  300. include_segment_ids.add(segment.id)
  301. child_chunk_detail = {
  302. "id": child_chunk.id,
  303. "content": child_chunk.content,
  304. "position": child_chunk.position,
  305. "score": document.metadata.get("score", 0.0),
  306. }
  307. map_detail = {
  308. "max_score": document.metadata.get("score", 0.0),
  309. "child_chunks": [child_chunk_detail],
  310. }
  311. segment_child_map[segment.id] = map_detail
  312. record = {
  313. "segment": segment,
  314. }
  315. records.append(record)
  316. else:
  317. child_chunk_detail = {
  318. "id": child_chunk.id,
  319. "content": child_chunk.content,
  320. "position": child_chunk.position,
  321. "score": document.metadata.get("score", 0.0),
  322. }
  323. segment_child_map[segment.id]["child_chunks"].append(child_chunk_detail)
  324. segment_child_map[segment.id]["max_score"] = max(
  325. segment_child_map[segment.id]["max_score"], document.metadata.get("score", 0.0)
  326. )
  327. else:
  328. # Handle normal documents
  329. index_node_id = document.metadata.get("doc_id")
  330. if not index_node_id:
  331. continue
  332. segment = (
  333. db.session.query(DocumentSegment)
  334. .filter(
  335. DocumentSegment.dataset_id == dataset_document.dataset_id,
  336. DocumentSegment.enabled == True,
  337. DocumentSegment.status == "completed",
  338. DocumentSegment.index_node_id == index_node_id,
  339. )
  340. .first()
  341. )
  342. if not segment:
  343. continue
  344. include_segment_ids.add(segment.id)
  345. record = {
  346. "segment": segment,
  347. "score": document.metadata.get("score"), # type: ignore
  348. }
  349. records.append(record)
  350. # Add child chunks information to records
  351. for record in records:
  352. if record["segment"].id in segment_child_map:
  353. record["child_chunks"] = segment_child_map[record["segment"].id].get("child_chunks") # type: ignore
  354. record["score"] = segment_child_map[record["segment"].id]["max_score"]
  355. return [RetrievalSegments(**record) for record in records]
  356. except Exception as e:
  357. db.session.rollback()
  358. raise e