datasets.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774
  1. import flask_restful # type: ignore
  2. from flask import request
  3. from flask_login import current_user # type: ignore # type: ignore
  4. from flask_restful import Resource, marshal, marshal_with, reqparse # type: ignore
  5. from werkzeug.exceptions import Forbidden, NotFound
  6. import services
  7. from configs import dify_config
  8. from controllers.console import api
  9. from controllers.console.apikey import api_key_fields, api_key_list
  10. from controllers.console.app.error import ProviderNotInitializeError
  11. from controllers.console.datasets.error import DatasetInUseError, DatasetNameDuplicateError, IndexingEstimateError
  12. from controllers.console.wraps import (
  13. account_initialization_required,
  14. cloud_edition_billing_rate_limit_check,
  15. enterprise_license_required,
  16. setup_required,
  17. )
  18. from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
  19. from core.indexing_runner import IndexingRunner
  20. from core.model_runtime.entities.model_entities import ModelType
  21. from core.plugin.entities.plugin import ModelProviderID
  22. from core.provider_manager import ProviderManager
  23. from core.rag.datasource.vdb.vector_type import VectorType
  24. from core.rag.extractor.entity.extract_setting import ExtractSetting
  25. from core.rag.retrieval.retrieval_methods import RetrievalMethod
  26. from extensions.ext_database import db
  27. from fields.app_fields import related_app_list
  28. from fields.dataset_fields import dataset_detail_fields, dataset_query_detail_fields
  29. from fields.document_fields import document_status_fields
  30. from libs.login import login_required
  31. from models import ApiToken, Dataset, Document, DocumentSegment, UploadFile
  32. from models.dataset import DatasetPermissionEnum
  33. from services.dataset_service import DatasetPermissionService, DatasetService, DocumentService
  34. def _validate_name(name):
  35. if not name or len(name) < 1 or len(name) > 40:
  36. raise ValueError("Name must be between 1 to 40 characters.")
  37. return name
  38. def _validate_description_length(description):
  39. if len(description) > 400:
  40. raise ValueError("Description cannot exceed 400 characters.")
  41. return description
  42. class DatasetListApi(Resource):
  43. @setup_required
  44. @login_required
  45. @account_initialization_required
  46. @enterprise_license_required
  47. def get(self):
  48. page = request.args.get("page", default=1, type=int)
  49. limit = request.args.get("limit", default=20, type=int)
  50. ids = request.args.getlist("ids")
  51. # provider = request.args.get("provider", default="vendor")
  52. search = request.args.get("keyword", default=None, type=str)
  53. tag_ids = request.args.getlist("tag_ids")
  54. include_all = request.args.get("include_all", default="false").lower() == "true"
  55. if ids:
  56. datasets, total = DatasetService.get_datasets_by_ids(ids, current_user.current_tenant_id)
  57. else:
  58. datasets, total = DatasetService.get_datasets(
  59. page, limit, current_user.current_tenant_id, current_user, search, tag_ids, include_all
  60. )
  61. # check embedding setting
  62. provider_manager = ProviderManager()
  63. configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)
  64. embedding_models = configurations.get_models(model_type=ModelType.TEXT_EMBEDDING, only_active=True)
  65. model_names = []
  66. for embedding_model in embedding_models:
  67. model_names.append(f"{embedding_model.model}:{embedding_model.provider.provider}")
  68. data = marshal(datasets, dataset_detail_fields)
  69. for item in data:
  70. # convert embedding_model_provider to plugin standard format
  71. if item["indexing_technique"] == "high_quality":
  72. item["embedding_model_provider"] = str(ModelProviderID(item["embedding_model_provider"]))
  73. item_model = f"{item['embedding_model']}:{item['embedding_model_provider']}"
  74. if item_model in model_names:
  75. item["embedding_available"] = True
  76. else:
  77. item["embedding_available"] = False
  78. else:
  79. item["embedding_available"] = True
  80. if item.get("permission") == "partial_members":
  81. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(item["id"])
  82. item.update({"partial_member_list": part_users_list})
  83. else:
  84. item.update({"partial_member_list": []})
  85. response = {"data": data, "has_more": len(datasets) == limit, "limit": limit, "total": total, "page": page}
  86. return response, 200
  87. @setup_required
  88. @login_required
  89. @account_initialization_required
  90. @cloud_edition_billing_rate_limit_check("knowledge")
  91. def post(self):
  92. parser = reqparse.RequestParser()
  93. parser.add_argument(
  94. "name",
  95. nullable=False,
  96. required=True,
  97. help="type is required. Name must be between 1 to 40 characters.",
  98. type=_validate_name,
  99. )
  100. parser.add_argument(
  101. "description",
  102. type=str,
  103. nullable=True,
  104. required=False,
  105. default="",
  106. )
  107. parser.add_argument(
  108. "indexing_technique",
  109. type=str,
  110. location="json",
  111. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  112. nullable=True,
  113. help="Invalid indexing technique.",
  114. )
  115. parser.add_argument(
  116. "external_knowledge_api_id",
  117. type=str,
  118. nullable=True,
  119. required=False,
  120. )
  121. parser.add_argument(
  122. "provider",
  123. type=str,
  124. nullable=True,
  125. choices=Dataset.PROVIDER_LIST,
  126. required=False,
  127. default="vendor",
  128. )
  129. parser.add_argument(
  130. "external_knowledge_id",
  131. type=str,
  132. nullable=True,
  133. required=False,
  134. )
  135. args = parser.parse_args()
  136. # The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator
  137. if not current_user.is_dataset_editor:
  138. raise Forbidden()
  139. try:
  140. dataset = DatasetService.create_empty_dataset(
  141. tenant_id=current_user.current_tenant_id,
  142. name=args["name"],
  143. description=args["description"],
  144. indexing_technique=args["indexing_technique"],
  145. account=current_user,
  146. permission=DatasetPermissionEnum.ONLY_ME,
  147. provider=args["provider"],
  148. external_knowledge_api_id=args["external_knowledge_api_id"],
  149. external_knowledge_id=args["external_knowledge_id"],
  150. )
  151. except services.errors.dataset.DatasetNameDuplicateError:
  152. raise DatasetNameDuplicateError()
  153. return marshal(dataset, dataset_detail_fields), 201
  154. class DatasetApi(Resource):
  155. @setup_required
  156. @login_required
  157. @account_initialization_required
  158. def get(self, dataset_id):
  159. dataset_id_str = str(dataset_id)
  160. dataset = DatasetService.get_dataset(dataset_id_str)
  161. if dataset is None:
  162. raise NotFound("Dataset not found.")
  163. try:
  164. DatasetService.check_dataset_permission(dataset, current_user)
  165. except services.errors.account.NoPermissionError as e:
  166. raise Forbidden(str(e))
  167. data = marshal(dataset, dataset_detail_fields)
  168. if data.get("permission") == "partial_members":
  169. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  170. data.update({"partial_member_list": part_users_list})
  171. # check embedding setting
  172. provider_manager = ProviderManager()
  173. configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)
  174. embedding_models = configurations.get_models(model_type=ModelType.TEXT_EMBEDDING, only_active=True)
  175. model_names = []
  176. for embedding_model in embedding_models:
  177. model_names.append(f"{embedding_model.model}:{embedding_model.provider.provider}")
  178. if data["indexing_technique"] == "high_quality":
  179. item_model = f"{data['embedding_model']}:{data['embedding_model_provider']}"
  180. if item_model in model_names:
  181. data["embedding_available"] = True
  182. else:
  183. data["embedding_available"] = False
  184. else:
  185. data["embedding_available"] = True
  186. if data.get("permission") == "partial_members":
  187. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  188. data.update({"partial_member_list": part_users_list})
  189. return data, 200
  190. @setup_required
  191. @login_required
  192. @account_initialization_required
  193. @cloud_edition_billing_rate_limit_check("knowledge")
  194. def patch(self, dataset_id):
  195. dataset_id_str = str(dataset_id)
  196. dataset = DatasetService.get_dataset(dataset_id_str)
  197. if dataset is None:
  198. raise NotFound("Dataset not found.")
  199. parser = reqparse.RequestParser()
  200. parser.add_argument(
  201. "name",
  202. nullable=False,
  203. help="type is required. Name must be between 1 to 40 characters.",
  204. type=_validate_name,
  205. )
  206. parser.add_argument("description", location="json", store_missing=False, type=_validate_description_length)
  207. parser.add_argument(
  208. "indexing_technique",
  209. type=str,
  210. location="json",
  211. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  212. nullable=True,
  213. help="Invalid indexing technique.",
  214. )
  215. parser.add_argument(
  216. "permission",
  217. type=str,
  218. location="json",
  219. choices=(DatasetPermissionEnum.ONLY_ME, DatasetPermissionEnum.ALL_TEAM, DatasetPermissionEnum.PARTIAL_TEAM),
  220. help="Invalid permission.",
  221. )
  222. parser.add_argument("embedding_model", type=str, location="json", help="Invalid embedding model.")
  223. parser.add_argument(
  224. "embedding_model_provider", type=str, location="json", help="Invalid embedding model provider."
  225. )
  226. parser.add_argument("retrieval_model", type=dict, location="json", help="Invalid retrieval model.")
  227. parser.add_argument("partial_member_list", type=list, location="json", help="Invalid parent user list.")
  228. parser.add_argument(
  229. "external_retrieval_model",
  230. type=dict,
  231. required=False,
  232. nullable=True,
  233. location="json",
  234. help="Invalid external retrieval model.",
  235. )
  236. parser.add_argument(
  237. "external_knowledge_id",
  238. type=str,
  239. required=False,
  240. nullable=True,
  241. location="json",
  242. help="Invalid external knowledge id.",
  243. )
  244. parser.add_argument(
  245. "external_knowledge_api_id",
  246. type=str,
  247. required=False,
  248. nullable=True,
  249. location="json",
  250. help="Invalid external knowledge api id.",
  251. )
  252. args = parser.parse_args()
  253. data = request.get_json()
  254. # check embedding model setting
  255. if data.get("indexing_technique") == "high_quality":
  256. DatasetService.check_embedding_model_setting(
  257. dataset.tenant_id, data.get("embedding_model_provider"), data.get("embedding_model")
  258. )
  259. # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator
  260. DatasetPermissionService.check_permission(
  261. current_user, dataset, data.get("permission"), data.get("partial_member_list")
  262. )
  263. dataset = DatasetService.update_dataset(dataset_id_str, args, current_user)
  264. if dataset is None:
  265. raise NotFound("Dataset not found.")
  266. result_data = marshal(dataset, dataset_detail_fields)
  267. tenant_id = current_user.current_tenant_id
  268. if data.get("partial_member_list") and data.get("permission") == "partial_members":
  269. DatasetPermissionService.update_partial_member_list(
  270. tenant_id, dataset_id_str, data.get("partial_member_list")
  271. )
  272. # clear partial member list when permission is only_me or all_team_members
  273. elif (
  274. data.get("permission") == DatasetPermissionEnum.ONLY_ME
  275. or data.get("permission") == DatasetPermissionEnum.ALL_TEAM
  276. ):
  277. DatasetPermissionService.clear_partial_member_list(dataset_id_str)
  278. partial_member_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  279. result_data.update({"partial_member_list": partial_member_list})
  280. return result_data, 200
  281. @setup_required
  282. @login_required
  283. @account_initialization_required
  284. @cloud_edition_billing_rate_limit_check("knowledge")
  285. def delete(self, dataset_id):
  286. dataset_id_str = str(dataset_id)
  287. # The role of the current user in the ta table must be admin, owner, or editor
  288. if not current_user.is_editor or current_user.is_dataset_operator:
  289. raise Forbidden()
  290. try:
  291. if DatasetService.delete_dataset(dataset_id_str, current_user):
  292. DatasetPermissionService.clear_partial_member_list(dataset_id_str)
  293. return {"result": "success"}, 204
  294. else:
  295. raise NotFound("Dataset not found.")
  296. except services.errors.dataset.DatasetInUseError:
  297. raise DatasetInUseError()
  298. class DatasetUseCheckApi(Resource):
  299. @setup_required
  300. @login_required
  301. @account_initialization_required
  302. def get(self, dataset_id):
  303. dataset_id_str = str(dataset_id)
  304. dataset_is_using = DatasetService.dataset_use_check(dataset_id_str)
  305. return {"is_using": dataset_is_using}, 200
  306. class DatasetQueryApi(Resource):
  307. @setup_required
  308. @login_required
  309. @account_initialization_required
  310. def get(self, dataset_id):
  311. dataset_id_str = str(dataset_id)
  312. dataset = DatasetService.get_dataset(dataset_id_str)
  313. if dataset is None:
  314. raise NotFound("Dataset not found.")
  315. try:
  316. DatasetService.check_dataset_permission(dataset, current_user)
  317. except services.errors.account.NoPermissionError as e:
  318. raise Forbidden(str(e))
  319. page = request.args.get("page", default=1, type=int)
  320. limit = request.args.get("limit", default=20, type=int)
  321. dataset_queries, total = DatasetService.get_dataset_queries(dataset_id=dataset.id, page=page, per_page=limit)
  322. response = {
  323. "data": marshal(dataset_queries, dataset_query_detail_fields),
  324. "has_more": len(dataset_queries) == limit,
  325. "limit": limit,
  326. "total": total,
  327. "page": page,
  328. }
  329. return response, 200
  330. class DatasetIndexingEstimateApi(Resource):
  331. @setup_required
  332. @login_required
  333. @account_initialization_required
  334. def post(self):
  335. parser = reqparse.RequestParser()
  336. parser.add_argument("info_list", type=dict, required=True, nullable=True, location="json")
  337. parser.add_argument("process_rule", type=dict, required=True, nullable=True, location="json")
  338. parser.add_argument(
  339. "indexing_technique",
  340. type=str,
  341. required=True,
  342. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  343. nullable=True,
  344. location="json",
  345. )
  346. parser.add_argument("doc_form", type=str, default="text_model", required=False, nullable=False, location="json")
  347. parser.add_argument("dataset_id", type=str, required=False, nullable=False, location="json")
  348. parser.add_argument(
  349. "doc_language", type=str, default="English", required=False, nullable=False, location="json"
  350. )
  351. args = parser.parse_args()
  352. # validate args
  353. DocumentService.estimate_args_validate(args)
  354. extract_settings = []
  355. if args["info_list"]["data_source_type"] == "upload_file":
  356. file_ids = args["info_list"]["file_info_list"]["file_ids"]
  357. file_details = (
  358. db.session.query(UploadFile)
  359. .filter(UploadFile.tenant_id == current_user.current_tenant_id, UploadFile.id.in_(file_ids))
  360. .all()
  361. )
  362. if file_details is None:
  363. raise NotFound("File not found.")
  364. if file_details:
  365. for file_detail in file_details:
  366. extract_setting = ExtractSetting(
  367. datasource_type="upload_file", upload_file=file_detail, document_model=args["doc_form"]
  368. )
  369. extract_settings.append(extract_setting)
  370. elif args["info_list"]["data_source_type"] == "notion_import":
  371. notion_info_list = args["info_list"]["notion_info_list"]
  372. for notion_info in notion_info_list:
  373. workspace_id = notion_info["workspace_id"]
  374. for page in notion_info["pages"]:
  375. extract_setting = ExtractSetting(
  376. datasource_type="notion_import",
  377. notion_info={
  378. "notion_workspace_id": workspace_id,
  379. "notion_obj_id": page["page_id"],
  380. "notion_page_type": page["type"],
  381. "tenant_id": current_user.current_tenant_id,
  382. },
  383. document_model=args["doc_form"],
  384. )
  385. extract_settings.append(extract_setting)
  386. elif args["info_list"]["data_source_type"] == "website_crawl":
  387. website_info_list = args["info_list"]["website_info_list"]
  388. for url in website_info_list["urls"]:
  389. extract_setting = ExtractSetting(
  390. datasource_type="website_crawl",
  391. website_info={
  392. "provider": website_info_list["provider"],
  393. "job_id": website_info_list["job_id"],
  394. "url": url,
  395. "tenant_id": current_user.current_tenant_id,
  396. "mode": "crawl",
  397. "only_main_content": website_info_list["only_main_content"],
  398. },
  399. document_model=args["doc_form"],
  400. )
  401. extract_settings.append(extract_setting)
  402. else:
  403. raise ValueError("Data source type not support")
  404. indexing_runner = IndexingRunner()
  405. try:
  406. response = indexing_runner.indexing_estimate(
  407. current_user.current_tenant_id,
  408. extract_settings,
  409. args["process_rule"],
  410. args["doc_form"],
  411. args["doc_language"],
  412. args["dataset_id"],
  413. args["indexing_technique"],
  414. )
  415. except LLMBadRequestError:
  416. raise ProviderNotInitializeError(
  417. "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
  418. )
  419. except ProviderTokenNotInitError as ex:
  420. raise ProviderNotInitializeError(ex.description)
  421. except Exception as e:
  422. raise IndexingEstimateError(str(e))
  423. return response.model_dump(), 200
  424. class DatasetRelatedAppListApi(Resource):
  425. @setup_required
  426. @login_required
  427. @account_initialization_required
  428. @marshal_with(related_app_list)
  429. def get(self, dataset_id):
  430. dataset_id_str = str(dataset_id)
  431. dataset = DatasetService.get_dataset(dataset_id_str)
  432. if dataset is None:
  433. raise NotFound("Dataset not found.")
  434. try:
  435. DatasetService.check_dataset_permission(dataset, current_user)
  436. except services.errors.account.NoPermissionError as e:
  437. raise Forbidden(str(e))
  438. app_dataset_joins = DatasetService.get_related_apps(dataset.id)
  439. related_apps = []
  440. for app_dataset_join in app_dataset_joins:
  441. app_model = app_dataset_join.app
  442. if app_model:
  443. related_apps.append(app_model)
  444. return {"data": related_apps, "total": len(related_apps)}, 200
  445. class DatasetIndexingStatusApi(Resource):
  446. @setup_required
  447. @login_required
  448. @account_initialization_required
  449. def get(self, dataset_id):
  450. dataset_id = str(dataset_id)
  451. documents = (
  452. db.session.query(Document)
  453. .filter(Document.dataset_id == dataset_id, Document.tenant_id == current_user.current_tenant_id)
  454. .all()
  455. )
  456. documents_status = []
  457. for document in documents:
  458. completed_segments = DocumentSegment.query.filter(
  459. DocumentSegment.completed_at.isnot(None),
  460. DocumentSegment.document_id == str(document.id),
  461. DocumentSegment.status != "re_segment",
  462. ).count()
  463. total_segments = DocumentSegment.query.filter(
  464. DocumentSegment.document_id == str(document.id), DocumentSegment.status != "re_segment"
  465. ).count()
  466. document.completed_segments = completed_segments
  467. document.total_segments = total_segments
  468. documents_status.append(marshal(document, document_status_fields))
  469. data = {"data": documents_status}
  470. return data
  471. class DatasetApiKeyApi(Resource):
  472. max_keys = 10
  473. token_prefix = "dataset-"
  474. resource_type = "dataset"
  475. @setup_required
  476. @login_required
  477. @account_initialization_required
  478. @marshal_with(api_key_list)
  479. def get(self):
  480. keys = (
  481. db.session.query(ApiToken)
  482. .filter(ApiToken.type == self.resource_type, ApiToken.tenant_id == current_user.current_tenant_id)
  483. .all()
  484. )
  485. return {"items": keys}
  486. @setup_required
  487. @login_required
  488. @account_initialization_required
  489. @marshal_with(api_key_fields)
  490. def post(self):
  491. # The role of the current user in the ta table must be admin or owner
  492. if not current_user.is_admin_or_owner:
  493. raise Forbidden()
  494. current_key_count = (
  495. db.session.query(ApiToken)
  496. .filter(ApiToken.type == self.resource_type, ApiToken.tenant_id == current_user.current_tenant_id)
  497. .count()
  498. )
  499. if current_key_count >= self.max_keys:
  500. flask_restful.abort(
  501. 400,
  502. message=f"Cannot create more than {self.max_keys} API keys for this resource type.",
  503. code="max_keys_exceeded",
  504. )
  505. key = ApiToken.generate_api_key(self.token_prefix, 24)
  506. api_token = ApiToken()
  507. api_token.tenant_id = current_user.current_tenant_id
  508. api_token.token = key
  509. api_token.type = self.resource_type
  510. db.session.add(api_token)
  511. db.session.commit()
  512. return api_token, 200
  513. class DatasetApiDeleteApi(Resource):
  514. resource_type = "dataset"
  515. @setup_required
  516. @login_required
  517. @account_initialization_required
  518. def delete(self, api_key_id):
  519. api_key_id = str(api_key_id)
  520. # The role of the current user in the ta table must be admin or owner
  521. if not current_user.is_admin_or_owner:
  522. raise Forbidden()
  523. key = (
  524. db.session.query(ApiToken)
  525. .filter(
  526. ApiToken.tenant_id == current_user.current_tenant_id,
  527. ApiToken.type == self.resource_type,
  528. ApiToken.id == api_key_id,
  529. )
  530. .first()
  531. )
  532. if key is None:
  533. flask_restful.abort(404, message="API key not found")
  534. db.session.query(ApiToken).filter(ApiToken.id == api_key_id).delete()
  535. db.session.commit()
  536. return {"result": "success"}, 204
  537. class DatasetApiBaseUrlApi(Resource):
  538. @setup_required
  539. @login_required
  540. @account_initialization_required
  541. def get(self):
  542. return {"api_base_url": (dify_config.SERVICE_API_URL or request.host_url.rstrip("/")) + "/v1"}
  543. class DatasetRetrievalSettingApi(Resource):
  544. @setup_required
  545. @login_required
  546. @account_initialization_required
  547. def get(self):
  548. vector_type = dify_config.VECTOR_STORE
  549. match vector_type:
  550. case (
  551. VectorType.RELYT
  552. | VectorType.TIDB_VECTOR
  553. | VectorType.CHROMA
  554. | VectorType.TENCENT
  555. | VectorType.PGVECTO_RS
  556. | VectorType.BAIDU
  557. | VectorType.VIKINGDB
  558. | VectorType.UPSTASH
  559. | VectorType.OCEANBASE
  560. ):
  561. return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}
  562. case (
  563. VectorType.QDRANT
  564. | VectorType.WEAVIATE
  565. | VectorType.OPENSEARCH
  566. | VectorType.ANALYTICDB
  567. | VectorType.MYSCALE
  568. | VectorType.ORACLE
  569. | VectorType.ELASTICSEARCH
  570. | VectorType.ELASTICSEARCH_JA
  571. | VectorType.PGVECTOR
  572. | VectorType.TIDB_ON_QDRANT
  573. | VectorType.LINDORM
  574. | VectorType.COUCHBASE
  575. | VectorType.MILVUS
  576. ):
  577. return {
  578. "retrieval_method": [
  579. RetrievalMethod.SEMANTIC_SEARCH.value,
  580. RetrievalMethod.FULL_TEXT_SEARCH.value,
  581. RetrievalMethod.HYBRID_SEARCH.value,
  582. ]
  583. }
  584. case _:
  585. raise ValueError(f"Unsupported vector db type {vector_type}.")
  586. class DatasetRetrievalSettingMockApi(Resource):
  587. @setup_required
  588. @login_required
  589. @account_initialization_required
  590. def get(self, vector_type):
  591. match vector_type:
  592. case (
  593. VectorType.MILVUS
  594. | VectorType.RELYT
  595. | VectorType.TIDB_VECTOR
  596. | VectorType.CHROMA
  597. | VectorType.TENCENT
  598. | VectorType.PGVECTO_RS
  599. | VectorType.BAIDU
  600. | VectorType.VIKINGDB
  601. | VectorType.UPSTASH
  602. | VectorType.OCEANBASE
  603. ):
  604. return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}
  605. case (
  606. VectorType.QDRANT
  607. | VectorType.WEAVIATE
  608. | VectorType.OPENSEARCH
  609. | VectorType.ANALYTICDB
  610. | VectorType.MYSCALE
  611. | VectorType.ORACLE
  612. | VectorType.ELASTICSEARCH
  613. | VectorType.ELASTICSEARCH_JA
  614. | VectorType.COUCHBASE
  615. | VectorType.PGVECTOR
  616. | VectorType.LINDORM
  617. ):
  618. return {
  619. "retrieval_method": [
  620. RetrievalMethod.SEMANTIC_SEARCH.value,
  621. RetrievalMethod.FULL_TEXT_SEARCH.value,
  622. RetrievalMethod.HYBRID_SEARCH.value,
  623. ]
  624. }
  625. case _:
  626. raise ValueError(f"Unsupported vector db type {vector_type}.")
  627. class DatasetErrorDocs(Resource):
  628. @setup_required
  629. @login_required
  630. @account_initialization_required
  631. def get(self, dataset_id):
  632. dataset_id_str = str(dataset_id)
  633. dataset = DatasetService.get_dataset(dataset_id_str)
  634. if dataset is None:
  635. raise NotFound("Dataset not found.")
  636. results = DocumentService.get_error_documents_by_dataset_id(dataset_id_str)
  637. return {"data": [marshal(item, document_status_fields) for item in results], "total": len(results)}, 200
  638. class DatasetPermissionUserListApi(Resource):
  639. @setup_required
  640. @login_required
  641. @account_initialization_required
  642. def get(self, dataset_id):
  643. dataset_id_str = str(dataset_id)
  644. dataset = DatasetService.get_dataset(dataset_id_str)
  645. if dataset is None:
  646. raise NotFound("Dataset not found.")
  647. try:
  648. DatasetService.check_dataset_permission(dataset, current_user)
  649. except services.errors.account.NoPermissionError as e:
  650. raise Forbidden(str(e))
  651. partial_members_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  652. return {
  653. "data": partial_members_list,
  654. }, 200
  655. class DatasetAutoDisableLogApi(Resource):
  656. @setup_required
  657. @login_required
  658. @account_initialization_required
  659. def get(self, dataset_id):
  660. dataset_id_str = str(dataset_id)
  661. dataset = DatasetService.get_dataset(dataset_id_str)
  662. if dataset is None:
  663. raise NotFound("Dataset not found.")
  664. return DatasetService.get_dataset_auto_disable_logs(dataset_id_str), 200
  665. api.add_resource(DatasetListApi, "/datasets")
  666. api.add_resource(DatasetApi, "/datasets/<uuid:dataset_id>")
  667. api.add_resource(DatasetUseCheckApi, "/datasets/<uuid:dataset_id>/use-check")
  668. api.add_resource(DatasetQueryApi, "/datasets/<uuid:dataset_id>/queries")
  669. api.add_resource(DatasetErrorDocs, "/datasets/<uuid:dataset_id>/error-docs")
  670. api.add_resource(DatasetIndexingEstimateApi, "/datasets/indexing-estimate")
  671. api.add_resource(DatasetRelatedAppListApi, "/datasets/<uuid:dataset_id>/related-apps")
  672. api.add_resource(DatasetIndexingStatusApi, "/datasets/<uuid:dataset_id>/indexing-status")
  673. api.add_resource(DatasetApiKeyApi, "/datasets/api-keys")
  674. api.add_resource(DatasetApiDeleteApi, "/datasets/api-keys/<uuid:api_key_id>")
  675. api.add_resource(DatasetApiBaseUrlApi, "/datasets/api-base-info")
  676. api.add_resource(DatasetRetrievalSettingApi, "/datasets/retrieval-setting")
  677. api.add_resource(DatasetRetrievalSettingMockApi, "/datasets/retrieval-setting/<string:vector_type>")
  678. api.add_resource(DatasetPermissionUserListApi, "/datasets/<uuid:dataset_id>/permission-part-users")
  679. api.add_resource(DatasetAutoDisableLogApi, "/datasets/<uuid:dataset_id>/auto-disable-logs")