datasets.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778
  1. import flask_restful # type: ignore
  2. from flask import request
  3. from flask_login import current_user # type: ignore # type: ignore
  4. from flask_restful import Resource, marshal, marshal_with, reqparse # type: ignore
  5. from werkzeug.exceptions import Forbidden, NotFound
  6. import services
  7. from configs import dify_config
  8. from controllers.console import api
  9. from controllers.console.apikey import api_key_fields, api_key_list
  10. from controllers.console.app.error import ProviderNotInitializeError
  11. from controllers.console.datasets.error import DatasetInUseError, DatasetNameDuplicateError, IndexingEstimateError
  12. from controllers.console.wraps import (
  13. account_initialization_required,
  14. cloud_edition_billing_rate_limit_check,
  15. enterprise_license_required,
  16. setup_required,
  17. )
  18. from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
  19. from core.indexing_runner import IndexingRunner
  20. from core.model_runtime.entities.model_entities import ModelType
  21. from core.plugin.entities.plugin import ModelProviderID
  22. from core.provider_manager import ProviderManager
  23. from core.rag.datasource.vdb.vector_type import VectorType
  24. from core.rag.extractor.entity.extract_setting import ExtractSetting
  25. from core.rag.retrieval.retrieval_methods import RetrievalMethod
  26. from extensions.ext_database import db
  27. from fields.app_fields import related_app_list
  28. from fields.dataset_fields import dataset_detail_fields, dataset_query_detail_fields
  29. from fields.document_fields import document_status_fields
  30. from libs.login import login_required
  31. from models import ApiToken, Dataset, Document, DocumentSegment, UploadFile
  32. from models.dataset import DatasetPermissionEnum
  33. from services.dataset_service import DatasetPermissionService, DatasetService, DocumentService
  34. def _validate_name(name):
  35. if not name or len(name) < 1 or len(name) > 40:
  36. raise ValueError("Name must be between 1 to 40 characters.")
  37. return name
  38. def _validate_description_length(description):
  39. if len(description) > 400:
  40. raise ValueError("Description cannot exceed 400 characters.")
  41. return description
  42. class DatasetListApi(Resource):
  43. @setup_required
  44. @login_required
  45. @account_initialization_required
  46. @enterprise_license_required
  47. def get(self):
  48. page = request.args.get("page", default=1, type=int)
  49. limit = request.args.get("limit", default=20, type=int)
  50. ids = request.args.getlist("ids")
  51. # provider = request.args.get("provider", default="vendor")
  52. search = request.args.get("keyword", default=None, type=str)
  53. tag_ids = request.args.getlist("tag_ids")
  54. include_all = request.args.get("include_all", default="false").lower() == "true"
  55. if ids:
  56. datasets, total = DatasetService.get_datasets_by_ids(ids, current_user.current_tenant_id)
  57. else:
  58. datasets, total = DatasetService.get_datasets(
  59. page, limit, current_user.current_tenant_id, current_user, search, tag_ids, include_all
  60. )
  61. # check embedding setting
  62. provider_manager = ProviderManager()
  63. configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)
  64. embedding_models = configurations.get_models(model_type=ModelType.TEXT_EMBEDDING, only_active=True)
  65. model_names = []
  66. for embedding_model in embedding_models:
  67. model_names.append(f"{embedding_model.model}:{embedding_model.provider.provider}")
  68. data = marshal(datasets, dataset_detail_fields)
  69. for item in data:
  70. # convert embedding_model_provider to plugin standard format
  71. if item["indexing_technique"] == "high_quality":
  72. item["embedding_model_provider"] = str(ModelProviderID(item["embedding_model_provider"]))
  73. item_model = f"{item['embedding_model']}:{item['embedding_model_provider']}"
  74. if item_model in model_names:
  75. item["embedding_available"] = True
  76. else:
  77. item["embedding_available"] = False
  78. else:
  79. item["embedding_available"] = True
  80. if item.get("permission") == "partial_members":
  81. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(item["id"])
  82. item.update({"partial_member_list": part_users_list})
  83. else:
  84. item.update({"partial_member_list": []})
  85. response = {"data": data, "has_more": len(datasets) == limit, "limit": limit, "total": total, "page": page}
  86. return response, 200
  87. @setup_required
  88. @login_required
  89. @account_initialization_required
  90. @cloud_edition_billing_rate_limit_check("knowledge")
  91. def post(self):
  92. parser = reqparse.RequestParser()
  93. parser.add_argument(
  94. "name",
  95. nullable=False,
  96. required=True,
  97. help="type is required. Name must be between 1 to 40 characters.",
  98. type=_validate_name,
  99. )
  100. parser.add_argument(
  101. "description",
  102. type=str,
  103. nullable=True,
  104. required=False,
  105. default="",
  106. )
  107. parser.add_argument(
  108. "indexing_technique",
  109. type=str,
  110. location="json",
  111. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  112. nullable=True,
  113. help="Invalid indexing technique.",
  114. )
  115. parser.add_argument(
  116. "external_knowledge_api_id",
  117. type=str,
  118. nullable=True,
  119. required=False,
  120. )
  121. parser.add_argument(
  122. "provider",
  123. type=str,
  124. nullable=True,
  125. choices=Dataset.PROVIDER_LIST,
  126. required=False,
  127. default="vendor",
  128. )
  129. parser.add_argument(
  130. "external_knowledge_id",
  131. type=str,
  132. nullable=True,
  133. required=False,
  134. )
  135. args = parser.parse_args()
  136. # The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator
  137. if not current_user.is_dataset_editor:
  138. raise Forbidden()
  139. try:
  140. dataset = DatasetService.create_empty_dataset(
  141. tenant_id=current_user.current_tenant_id,
  142. name=args["name"],
  143. description=args["description"],
  144. indexing_technique=args["indexing_technique"],
  145. account=current_user,
  146. permission=DatasetPermissionEnum.ONLY_ME,
  147. provider=args["provider"],
  148. external_knowledge_api_id=args["external_knowledge_api_id"],
  149. external_knowledge_id=args["external_knowledge_id"],
  150. )
  151. except services.errors.dataset.DatasetNameDuplicateError:
  152. raise DatasetNameDuplicateError()
  153. return marshal(dataset, dataset_detail_fields), 201
  154. class DatasetApi(Resource):
  155. @setup_required
  156. @login_required
  157. @account_initialization_required
  158. def get(self, dataset_id):
  159. dataset_id_str = str(dataset_id)
  160. dataset = DatasetService.get_dataset(dataset_id_str)
  161. if dataset is None:
  162. raise NotFound("Dataset not found.")
  163. try:
  164. DatasetService.check_dataset_permission(dataset, current_user)
  165. except services.errors.account.NoPermissionError as e:
  166. raise Forbidden(str(e))
  167. data = marshal(dataset, dataset_detail_fields)
  168. if data.get("permission") == "partial_members":
  169. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  170. data.update({"partial_member_list": part_users_list})
  171. # check embedding setting
  172. provider_manager = ProviderManager()
  173. configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)
  174. embedding_models = configurations.get_models(model_type=ModelType.TEXT_EMBEDDING, only_active=True)
  175. model_names = []
  176. for embedding_model in embedding_models:
  177. model_names.append(f"{embedding_model.model}:{embedding_model.provider.provider}")
  178. if data["indexing_technique"] == "high_quality":
  179. item_model = f"{data['embedding_model']}:{data['embedding_model_provider']}"
  180. if item_model in model_names:
  181. data["embedding_available"] = True
  182. else:
  183. data["embedding_available"] = False
  184. else:
  185. data["embedding_available"] = True
  186. if data.get("permission") == "partial_members":
  187. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  188. data.update({"partial_member_list": part_users_list})
  189. return data, 200
  190. @setup_required
  191. @login_required
  192. @account_initialization_required
  193. @cloud_edition_billing_rate_limit_check("knowledge")
  194. def patch(self, dataset_id):
  195. dataset_id_str = str(dataset_id)
  196. dataset = DatasetService.get_dataset(dataset_id_str)
  197. if dataset is None:
  198. raise NotFound("Dataset not found.")
  199. parser = reqparse.RequestParser()
  200. parser.add_argument(
  201. "name",
  202. nullable=False,
  203. help="type is required. Name must be between 1 to 40 characters.",
  204. type=_validate_name,
  205. )
  206. parser.add_argument("description", location="json", store_missing=False, type=_validate_description_length)
  207. parser.add_argument(
  208. "indexing_technique",
  209. type=str,
  210. location="json",
  211. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  212. nullable=True,
  213. help="Invalid indexing technique.",
  214. )
  215. parser.add_argument(
  216. "permission",
  217. type=str,
  218. location="json",
  219. choices=(DatasetPermissionEnum.ONLY_ME, DatasetPermissionEnum.ALL_TEAM, DatasetPermissionEnum.PARTIAL_TEAM),
  220. help="Invalid permission.",
  221. )
  222. parser.add_argument("embedding_model", type=str, location="json", help="Invalid embedding model.")
  223. parser.add_argument(
  224. "embedding_model_provider", type=str, location="json", help="Invalid embedding model provider."
  225. )
  226. parser.add_argument("retrieval_model", type=dict, location="json", help="Invalid retrieval model.")
  227. parser.add_argument("partial_member_list", type=list, location="json", help="Invalid parent user list.")
  228. parser.add_argument(
  229. "external_retrieval_model",
  230. type=dict,
  231. required=False,
  232. nullable=True,
  233. location="json",
  234. help="Invalid external retrieval model.",
  235. )
  236. parser.add_argument(
  237. "external_knowledge_id",
  238. type=str,
  239. required=False,
  240. nullable=True,
  241. location="json",
  242. help="Invalid external knowledge id.",
  243. )
  244. parser.add_argument(
  245. "external_knowledge_api_id",
  246. type=str,
  247. required=False,
  248. nullable=True,
  249. location="json",
  250. help="Invalid external knowledge api id.",
  251. )
  252. args = parser.parse_args()
  253. data = request.get_json()
  254. # check embedding model setting
  255. if (
  256. data.get("indexing_technique") == "high_quality"
  257. and data.get("embedding_model_provider") is not None
  258. and data.get("embedding_model") is not None
  259. ):
  260. DatasetService.check_embedding_model_setting(
  261. dataset.tenant_id, data.get("embedding_model_provider"), data.get("embedding_model")
  262. )
  263. # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator
  264. DatasetPermissionService.check_permission(
  265. current_user, dataset, data.get("permission"), data.get("partial_member_list")
  266. )
  267. dataset = DatasetService.update_dataset(dataset_id_str, args, current_user)
  268. if dataset is None:
  269. raise NotFound("Dataset not found.")
  270. result_data = marshal(dataset, dataset_detail_fields)
  271. tenant_id = current_user.current_tenant_id
  272. if data.get("partial_member_list") and data.get("permission") == "partial_members":
  273. DatasetPermissionService.update_partial_member_list(
  274. tenant_id, dataset_id_str, data.get("partial_member_list")
  275. )
  276. # clear partial member list when permission is only_me or all_team_members
  277. elif (
  278. data.get("permission") == DatasetPermissionEnum.ONLY_ME
  279. or data.get("permission") == DatasetPermissionEnum.ALL_TEAM
  280. ):
  281. DatasetPermissionService.clear_partial_member_list(dataset_id_str)
  282. partial_member_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  283. result_data.update({"partial_member_list": partial_member_list})
  284. return result_data, 200
  285. @setup_required
  286. @login_required
  287. @account_initialization_required
  288. @cloud_edition_billing_rate_limit_check("knowledge")
  289. def delete(self, dataset_id):
  290. dataset_id_str = str(dataset_id)
  291. # The role of the current user in the ta table must be admin, owner, or editor
  292. if not current_user.is_editor or current_user.is_dataset_operator:
  293. raise Forbidden()
  294. try:
  295. if DatasetService.delete_dataset(dataset_id_str, current_user):
  296. DatasetPermissionService.clear_partial_member_list(dataset_id_str)
  297. return {"result": "success"}, 204
  298. else:
  299. raise NotFound("Dataset not found.")
  300. except services.errors.dataset.DatasetInUseError:
  301. raise DatasetInUseError()
  302. class DatasetUseCheckApi(Resource):
  303. @setup_required
  304. @login_required
  305. @account_initialization_required
  306. def get(self, dataset_id):
  307. dataset_id_str = str(dataset_id)
  308. dataset_is_using = DatasetService.dataset_use_check(dataset_id_str)
  309. return {"is_using": dataset_is_using}, 200
  310. class DatasetQueryApi(Resource):
  311. @setup_required
  312. @login_required
  313. @account_initialization_required
  314. def get(self, dataset_id):
  315. dataset_id_str = str(dataset_id)
  316. dataset = DatasetService.get_dataset(dataset_id_str)
  317. if dataset is None:
  318. raise NotFound("Dataset not found.")
  319. try:
  320. DatasetService.check_dataset_permission(dataset, current_user)
  321. except services.errors.account.NoPermissionError as e:
  322. raise Forbidden(str(e))
  323. page = request.args.get("page", default=1, type=int)
  324. limit = request.args.get("limit", default=20, type=int)
  325. dataset_queries, total = DatasetService.get_dataset_queries(dataset_id=dataset.id, page=page, per_page=limit)
  326. response = {
  327. "data": marshal(dataset_queries, dataset_query_detail_fields),
  328. "has_more": len(dataset_queries) == limit,
  329. "limit": limit,
  330. "total": total,
  331. "page": page,
  332. }
  333. return response, 200
  334. class DatasetIndexingEstimateApi(Resource):
  335. @setup_required
  336. @login_required
  337. @account_initialization_required
  338. def post(self):
  339. parser = reqparse.RequestParser()
  340. parser.add_argument("info_list", type=dict, required=True, nullable=True, location="json")
  341. parser.add_argument("process_rule", type=dict, required=True, nullable=True, location="json")
  342. parser.add_argument(
  343. "indexing_technique",
  344. type=str,
  345. required=True,
  346. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  347. nullable=True,
  348. location="json",
  349. )
  350. parser.add_argument("doc_form", type=str, default="text_model", required=False, nullable=False, location="json")
  351. parser.add_argument("dataset_id", type=str, required=False, nullable=False, location="json")
  352. parser.add_argument(
  353. "doc_language", type=str, default="English", required=False, nullable=False, location="json"
  354. )
  355. args = parser.parse_args()
  356. # validate args
  357. DocumentService.estimate_args_validate(args)
  358. extract_settings = []
  359. if args["info_list"]["data_source_type"] == "upload_file":
  360. file_ids = args["info_list"]["file_info_list"]["file_ids"]
  361. file_details = (
  362. db.session.query(UploadFile)
  363. .filter(UploadFile.tenant_id == current_user.current_tenant_id, UploadFile.id.in_(file_ids))
  364. .all()
  365. )
  366. if file_details is None:
  367. raise NotFound("File not found.")
  368. if file_details:
  369. for file_detail in file_details:
  370. extract_setting = ExtractSetting(
  371. datasource_type="upload_file", upload_file=file_detail, document_model=args["doc_form"]
  372. )
  373. extract_settings.append(extract_setting)
  374. elif args["info_list"]["data_source_type"] == "notion_import":
  375. notion_info_list = args["info_list"]["notion_info_list"]
  376. for notion_info in notion_info_list:
  377. workspace_id = notion_info["workspace_id"]
  378. for page in notion_info["pages"]:
  379. extract_setting = ExtractSetting(
  380. datasource_type="notion_import",
  381. notion_info={
  382. "notion_workspace_id": workspace_id,
  383. "notion_obj_id": page["page_id"],
  384. "notion_page_type": page["type"],
  385. "tenant_id": current_user.current_tenant_id,
  386. },
  387. document_model=args["doc_form"],
  388. )
  389. extract_settings.append(extract_setting)
  390. elif args["info_list"]["data_source_type"] == "website_crawl":
  391. website_info_list = args["info_list"]["website_info_list"]
  392. for url in website_info_list["urls"]:
  393. extract_setting = ExtractSetting(
  394. datasource_type="website_crawl",
  395. website_info={
  396. "provider": website_info_list["provider"],
  397. "job_id": website_info_list["job_id"],
  398. "url": url,
  399. "tenant_id": current_user.current_tenant_id,
  400. "mode": "crawl",
  401. "only_main_content": website_info_list["only_main_content"],
  402. },
  403. document_model=args["doc_form"],
  404. )
  405. extract_settings.append(extract_setting)
  406. else:
  407. raise ValueError("Data source type not support")
  408. indexing_runner = IndexingRunner()
  409. try:
  410. response = indexing_runner.indexing_estimate(
  411. current_user.current_tenant_id,
  412. extract_settings,
  413. args["process_rule"],
  414. args["doc_form"],
  415. args["doc_language"],
  416. args["dataset_id"],
  417. args["indexing_technique"],
  418. )
  419. except LLMBadRequestError:
  420. raise ProviderNotInitializeError(
  421. "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
  422. )
  423. except ProviderTokenNotInitError as ex:
  424. raise ProviderNotInitializeError(ex.description)
  425. except Exception as e:
  426. raise IndexingEstimateError(str(e))
  427. return response.model_dump(), 200
  428. class DatasetRelatedAppListApi(Resource):
  429. @setup_required
  430. @login_required
  431. @account_initialization_required
  432. @marshal_with(related_app_list)
  433. def get(self, dataset_id):
  434. dataset_id_str = str(dataset_id)
  435. dataset = DatasetService.get_dataset(dataset_id_str)
  436. if dataset is None:
  437. raise NotFound("Dataset not found.")
  438. try:
  439. DatasetService.check_dataset_permission(dataset, current_user)
  440. except services.errors.account.NoPermissionError as e:
  441. raise Forbidden(str(e))
  442. app_dataset_joins = DatasetService.get_related_apps(dataset.id)
  443. related_apps = []
  444. for app_dataset_join in app_dataset_joins:
  445. app_model = app_dataset_join.app
  446. if app_model:
  447. related_apps.append(app_model)
  448. return {"data": related_apps, "total": len(related_apps)}, 200
  449. class DatasetIndexingStatusApi(Resource):
  450. @setup_required
  451. @login_required
  452. @account_initialization_required
  453. def get(self, dataset_id):
  454. dataset_id = str(dataset_id)
  455. documents = (
  456. db.session.query(Document)
  457. .filter(Document.dataset_id == dataset_id, Document.tenant_id == current_user.current_tenant_id)
  458. .all()
  459. )
  460. documents_status = []
  461. for document in documents:
  462. completed_segments = DocumentSegment.query.filter(
  463. DocumentSegment.completed_at.isnot(None),
  464. DocumentSegment.document_id == str(document.id),
  465. DocumentSegment.status != "re_segment",
  466. ).count()
  467. total_segments = DocumentSegment.query.filter(
  468. DocumentSegment.document_id == str(document.id), DocumentSegment.status != "re_segment"
  469. ).count()
  470. document.completed_segments = completed_segments
  471. document.total_segments = total_segments
  472. documents_status.append(marshal(document, document_status_fields))
  473. data = {"data": documents_status}
  474. return data
  475. class DatasetApiKeyApi(Resource):
  476. max_keys = 10
  477. token_prefix = "dataset-"
  478. resource_type = "dataset"
  479. @setup_required
  480. @login_required
  481. @account_initialization_required
  482. @marshal_with(api_key_list)
  483. def get(self):
  484. keys = (
  485. db.session.query(ApiToken)
  486. .filter(ApiToken.type == self.resource_type, ApiToken.tenant_id == current_user.current_tenant_id)
  487. .all()
  488. )
  489. return {"items": keys}
  490. @setup_required
  491. @login_required
  492. @account_initialization_required
  493. @marshal_with(api_key_fields)
  494. def post(self):
  495. # The role of the current user in the ta table must be admin or owner
  496. if not current_user.is_admin_or_owner:
  497. raise Forbidden()
  498. current_key_count = (
  499. db.session.query(ApiToken)
  500. .filter(ApiToken.type == self.resource_type, ApiToken.tenant_id == current_user.current_tenant_id)
  501. .count()
  502. )
  503. if current_key_count >= self.max_keys:
  504. flask_restful.abort(
  505. 400,
  506. message=f"Cannot create more than {self.max_keys} API keys for this resource type.",
  507. code="max_keys_exceeded",
  508. )
  509. key = ApiToken.generate_api_key(self.token_prefix, 24)
  510. api_token = ApiToken()
  511. api_token.tenant_id = current_user.current_tenant_id
  512. api_token.token = key
  513. api_token.type = self.resource_type
  514. db.session.add(api_token)
  515. db.session.commit()
  516. return api_token, 200
  517. class DatasetApiDeleteApi(Resource):
  518. resource_type = "dataset"
  519. @setup_required
  520. @login_required
  521. @account_initialization_required
  522. def delete(self, api_key_id):
  523. api_key_id = str(api_key_id)
  524. # The role of the current user in the ta table must be admin or owner
  525. if not current_user.is_admin_or_owner:
  526. raise Forbidden()
  527. key = (
  528. db.session.query(ApiToken)
  529. .filter(
  530. ApiToken.tenant_id == current_user.current_tenant_id,
  531. ApiToken.type == self.resource_type,
  532. ApiToken.id == api_key_id,
  533. )
  534. .first()
  535. )
  536. if key is None:
  537. flask_restful.abort(404, message="API key not found")
  538. db.session.query(ApiToken).filter(ApiToken.id == api_key_id).delete()
  539. db.session.commit()
  540. return {"result": "success"}, 204
  541. class DatasetApiBaseUrlApi(Resource):
  542. @setup_required
  543. @login_required
  544. @account_initialization_required
  545. def get(self):
  546. return {"api_base_url": (dify_config.SERVICE_API_URL or request.host_url.rstrip("/")) + "/v1"}
  547. class DatasetRetrievalSettingApi(Resource):
  548. @setup_required
  549. @login_required
  550. @account_initialization_required
  551. def get(self):
  552. vector_type = dify_config.VECTOR_STORE
  553. match vector_type:
  554. case (
  555. VectorType.RELYT
  556. | VectorType.TIDB_VECTOR
  557. | VectorType.CHROMA
  558. | VectorType.TENCENT
  559. | VectorType.PGVECTO_RS
  560. | VectorType.BAIDU
  561. | VectorType.VIKINGDB
  562. | VectorType.UPSTASH
  563. | VectorType.OCEANBASE
  564. ):
  565. return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}
  566. case (
  567. VectorType.QDRANT
  568. | VectorType.WEAVIATE
  569. | VectorType.OPENSEARCH
  570. | VectorType.ANALYTICDB
  571. | VectorType.MYSCALE
  572. | VectorType.ORACLE
  573. | VectorType.ELASTICSEARCH
  574. | VectorType.ELASTICSEARCH_JA
  575. | VectorType.PGVECTOR
  576. | VectorType.TIDB_ON_QDRANT
  577. | VectorType.LINDORM
  578. | VectorType.COUCHBASE
  579. | VectorType.MILVUS
  580. ):
  581. return {
  582. "retrieval_method": [
  583. RetrievalMethod.SEMANTIC_SEARCH.value,
  584. RetrievalMethod.FULL_TEXT_SEARCH.value,
  585. RetrievalMethod.HYBRID_SEARCH.value,
  586. ]
  587. }
  588. case _:
  589. raise ValueError(f"Unsupported vector db type {vector_type}.")
  590. class DatasetRetrievalSettingMockApi(Resource):
  591. @setup_required
  592. @login_required
  593. @account_initialization_required
  594. def get(self, vector_type):
  595. match vector_type:
  596. case (
  597. VectorType.MILVUS
  598. | VectorType.RELYT
  599. | VectorType.TIDB_VECTOR
  600. | VectorType.CHROMA
  601. | VectorType.TENCENT
  602. | VectorType.PGVECTO_RS
  603. | VectorType.BAIDU
  604. | VectorType.VIKINGDB
  605. | VectorType.UPSTASH
  606. | VectorType.OCEANBASE
  607. ):
  608. return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}
  609. case (
  610. VectorType.QDRANT
  611. | VectorType.WEAVIATE
  612. | VectorType.OPENSEARCH
  613. | VectorType.ANALYTICDB
  614. | VectorType.MYSCALE
  615. | VectorType.ORACLE
  616. | VectorType.ELASTICSEARCH
  617. | VectorType.ELASTICSEARCH_JA
  618. | VectorType.COUCHBASE
  619. | VectorType.PGVECTOR
  620. | VectorType.LINDORM
  621. ):
  622. return {
  623. "retrieval_method": [
  624. RetrievalMethod.SEMANTIC_SEARCH.value,
  625. RetrievalMethod.FULL_TEXT_SEARCH.value,
  626. RetrievalMethod.HYBRID_SEARCH.value,
  627. ]
  628. }
  629. case _:
  630. raise ValueError(f"Unsupported vector db type {vector_type}.")
  631. class DatasetErrorDocs(Resource):
  632. @setup_required
  633. @login_required
  634. @account_initialization_required
  635. def get(self, dataset_id):
  636. dataset_id_str = str(dataset_id)
  637. dataset = DatasetService.get_dataset(dataset_id_str)
  638. if dataset is None:
  639. raise NotFound("Dataset not found.")
  640. results = DocumentService.get_error_documents_by_dataset_id(dataset_id_str)
  641. return {"data": [marshal(item, document_status_fields) for item in results], "total": len(results)}, 200
  642. class DatasetPermissionUserListApi(Resource):
  643. @setup_required
  644. @login_required
  645. @account_initialization_required
  646. def get(self, dataset_id):
  647. dataset_id_str = str(dataset_id)
  648. dataset = DatasetService.get_dataset(dataset_id_str)
  649. if dataset is None:
  650. raise NotFound("Dataset not found.")
  651. try:
  652. DatasetService.check_dataset_permission(dataset, current_user)
  653. except services.errors.account.NoPermissionError as e:
  654. raise Forbidden(str(e))
  655. partial_members_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  656. return {
  657. "data": partial_members_list,
  658. }, 200
  659. class DatasetAutoDisableLogApi(Resource):
  660. @setup_required
  661. @login_required
  662. @account_initialization_required
  663. def get(self, dataset_id):
  664. dataset_id_str = str(dataset_id)
  665. dataset = DatasetService.get_dataset(dataset_id_str)
  666. if dataset is None:
  667. raise NotFound("Dataset not found.")
  668. return DatasetService.get_dataset_auto_disable_logs(dataset_id_str), 200
  669. api.add_resource(DatasetListApi, "/datasets")
  670. api.add_resource(DatasetApi, "/datasets/<uuid:dataset_id>")
  671. api.add_resource(DatasetUseCheckApi, "/datasets/<uuid:dataset_id>/use-check")
  672. api.add_resource(DatasetQueryApi, "/datasets/<uuid:dataset_id>/queries")
  673. api.add_resource(DatasetErrorDocs, "/datasets/<uuid:dataset_id>/error-docs")
  674. api.add_resource(DatasetIndexingEstimateApi, "/datasets/indexing-estimate")
  675. api.add_resource(DatasetRelatedAppListApi, "/datasets/<uuid:dataset_id>/related-apps")
  676. api.add_resource(DatasetIndexingStatusApi, "/datasets/<uuid:dataset_id>/indexing-status")
  677. api.add_resource(DatasetApiKeyApi, "/datasets/api-keys")
  678. api.add_resource(DatasetApiDeleteApi, "/datasets/api-keys/<uuid:api_key_id>")
  679. api.add_resource(DatasetApiBaseUrlApi, "/datasets/api-base-info")
  680. api.add_resource(DatasetRetrievalSettingApi, "/datasets/retrieval-setting")
  681. api.add_resource(DatasetRetrievalSettingMockApi, "/datasets/retrieval-setting/<string:vector_type>")
  682. api.add_resource(DatasetPermissionUserListApi, "/datasets/<uuid:dataset_id>/permission-part-users")
  683. api.add_resource(DatasetAutoDisableLogApi, "/datasets/<uuid:dataset_id>/auto-disable-logs")