datasets.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772
  1. import flask_restful # type: ignore
  2. from flask import request
  3. from flask_login import current_user # type: ignore # type: ignore
  4. from flask_restful import Resource, marshal, marshal_with, reqparse # type: ignore
  5. from werkzeug.exceptions import Forbidden, NotFound
  6. import services
  7. from configs import dify_config
  8. from controllers.console import api
  9. from controllers.console.apikey import api_key_fields, api_key_list
  10. from controllers.console.app.error import ProviderNotInitializeError
  11. from controllers.console.datasets.error import DatasetInUseError, DatasetNameDuplicateError, IndexingEstimateError
  12. from controllers.console.wraps import (
  13. account_initialization_required,
  14. cloud_edition_billing_rate_limit_check,
  15. enterprise_license_required,
  16. setup_required,
  17. )
  18. from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
  19. from core.indexing_runner import IndexingRunner
  20. from core.model_runtime.entities.model_entities import ModelType
  21. from core.provider_manager import ProviderManager
  22. from core.rag.datasource.vdb.vector_type import VectorType
  23. from core.rag.extractor.entity.extract_setting import ExtractSetting
  24. from core.rag.retrieval.retrieval_methods import RetrievalMethod
  25. from extensions.ext_database import db
  26. from fields.app_fields import related_app_list
  27. from fields.dataset_fields import dataset_detail_fields, dataset_query_detail_fields
  28. from fields.document_fields import document_status_fields
  29. from libs.login import login_required
  30. from models import ApiToken, Dataset, Document, DocumentSegment, UploadFile
  31. from models.dataset import DatasetPermissionEnum
  32. from services.dataset_service import DatasetPermissionService, DatasetService, DocumentService
  33. def _validate_name(name):
  34. if not name or len(name) < 1 or len(name) > 40:
  35. raise ValueError("Name must be between 1 to 40 characters.")
  36. return name
  37. def _validate_description_length(description):
  38. if len(description) > 400:
  39. raise ValueError("Description cannot exceed 400 characters.")
  40. return description
  41. class DatasetListApi(Resource):
  42. @setup_required
  43. @login_required
  44. @account_initialization_required
  45. @enterprise_license_required
  46. def get(self):
  47. page = request.args.get("page", default=1, type=int)
  48. limit = request.args.get("limit", default=20, type=int)
  49. ids = request.args.getlist("ids")
  50. # provider = request.args.get("provider", default="vendor")
  51. search = request.args.get("keyword", default=None, type=str)
  52. tag_ids = request.args.getlist("tag_ids")
  53. include_all = request.args.get("include_all", default="false").lower() == "true"
  54. if ids:
  55. datasets, total = DatasetService.get_datasets_by_ids(ids, current_user.current_tenant_id)
  56. else:
  57. datasets, total = DatasetService.get_datasets(
  58. page, limit, current_user.current_tenant_id, current_user, search, tag_ids, include_all
  59. )
  60. # check embedding setting
  61. provider_manager = ProviderManager()
  62. configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)
  63. embedding_models = configurations.get_models(model_type=ModelType.TEXT_EMBEDDING, only_active=True)
  64. model_names = []
  65. for embedding_model in embedding_models:
  66. model_names.append(f"{embedding_model.model}:{embedding_model.provider.provider}")
  67. data = marshal(datasets, dataset_detail_fields)
  68. for item in data:
  69. if item["indexing_technique"] == "high_quality":
  70. item_model = f"{item['embedding_model']}:{item['embedding_model_provider']}"
  71. if item_model in model_names:
  72. item["embedding_available"] = True
  73. else:
  74. item["embedding_available"] = False
  75. else:
  76. item["embedding_available"] = True
  77. if item.get("permission") == "partial_members":
  78. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(item["id"])
  79. item.update({"partial_member_list": part_users_list})
  80. else:
  81. item.update({"partial_member_list": []})
  82. response = {"data": data, "has_more": len(datasets) == limit, "limit": limit, "total": total, "page": page}
  83. return response, 200
  84. @setup_required
  85. @login_required
  86. @account_initialization_required
  87. @cloud_edition_billing_rate_limit_check("knowledge")
  88. def post(self):
  89. parser = reqparse.RequestParser()
  90. parser.add_argument(
  91. "name",
  92. nullable=False,
  93. required=True,
  94. help="type is required. Name must be between 1 to 40 characters.",
  95. type=_validate_name,
  96. )
  97. parser.add_argument(
  98. "description",
  99. type=str,
  100. nullable=True,
  101. required=False,
  102. default="",
  103. )
  104. parser.add_argument(
  105. "indexing_technique",
  106. type=str,
  107. location="json",
  108. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  109. nullable=True,
  110. help="Invalid indexing technique.",
  111. )
  112. parser.add_argument(
  113. "external_knowledge_api_id",
  114. type=str,
  115. nullable=True,
  116. required=False,
  117. )
  118. parser.add_argument(
  119. "provider",
  120. type=str,
  121. nullable=True,
  122. choices=Dataset.PROVIDER_LIST,
  123. required=False,
  124. default="vendor",
  125. )
  126. parser.add_argument(
  127. "external_knowledge_id",
  128. type=str,
  129. nullable=True,
  130. required=False,
  131. )
  132. args = parser.parse_args()
  133. # The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator
  134. if not current_user.is_dataset_editor:
  135. raise Forbidden()
  136. try:
  137. dataset = DatasetService.create_empty_dataset(
  138. tenant_id=current_user.current_tenant_id,
  139. name=args["name"],
  140. description=args["description"],
  141. indexing_technique=args["indexing_technique"],
  142. account=current_user,
  143. permission=DatasetPermissionEnum.ONLY_ME,
  144. provider=args["provider"],
  145. external_knowledge_api_id=args["external_knowledge_api_id"],
  146. external_knowledge_id=args["external_knowledge_id"],
  147. )
  148. except services.errors.dataset.DatasetNameDuplicateError:
  149. raise DatasetNameDuplicateError()
  150. return marshal(dataset, dataset_detail_fields), 201
  151. class DatasetApi(Resource):
  152. @setup_required
  153. @login_required
  154. @account_initialization_required
  155. def get(self, dataset_id):
  156. dataset_id_str = str(dataset_id)
  157. dataset = DatasetService.get_dataset(dataset_id_str)
  158. if dataset is None:
  159. raise NotFound("Dataset not found.")
  160. try:
  161. DatasetService.check_dataset_permission(dataset, current_user)
  162. except services.errors.account.NoPermissionError as e:
  163. raise Forbidden(str(e))
  164. data = marshal(dataset, dataset_detail_fields)
  165. if data.get("permission") == "partial_members":
  166. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  167. data.update({"partial_member_list": part_users_list})
  168. # check embedding setting
  169. provider_manager = ProviderManager()
  170. configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)
  171. embedding_models = configurations.get_models(model_type=ModelType.TEXT_EMBEDDING, only_active=True)
  172. model_names = []
  173. for embedding_model in embedding_models:
  174. model_names.append(f"{embedding_model.model}:{embedding_model.provider.provider}")
  175. if data["indexing_technique"] == "high_quality":
  176. item_model = f"{data['embedding_model']}:{data['embedding_model_provider']}"
  177. if item_model in model_names:
  178. data["embedding_available"] = True
  179. else:
  180. data["embedding_available"] = False
  181. else:
  182. data["embedding_available"] = True
  183. if data.get("permission") == "partial_members":
  184. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  185. data.update({"partial_member_list": part_users_list})
  186. return data, 200
  187. @setup_required
  188. @login_required
  189. @account_initialization_required
  190. @cloud_edition_billing_rate_limit_check("knowledge")
  191. def patch(self, dataset_id):
  192. dataset_id_str = str(dataset_id)
  193. dataset = DatasetService.get_dataset(dataset_id_str)
  194. if dataset is None:
  195. raise NotFound("Dataset not found.")
  196. parser = reqparse.RequestParser()
  197. parser.add_argument(
  198. "name",
  199. nullable=False,
  200. help="type is required. Name must be between 1 to 40 characters.",
  201. type=_validate_name,
  202. )
  203. parser.add_argument("description", location="json", store_missing=False, type=_validate_description_length)
  204. parser.add_argument(
  205. "indexing_technique",
  206. type=str,
  207. location="json",
  208. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  209. nullable=True,
  210. help="Invalid indexing technique.",
  211. )
  212. parser.add_argument(
  213. "permission",
  214. type=str,
  215. location="json",
  216. choices=(DatasetPermissionEnum.ONLY_ME, DatasetPermissionEnum.ALL_TEAM, DatasetPermissionEnum.PARTIAL_TEAM),
  217. help="Invalid permission.",
  218. )
  219. parser.add_argument("embedding_model", type=str, location="json", help="Invalid embedding model.")
  220. parser.add_argument(
  221. "embedding_model_provider", type=str, location="json", help="Invalid embedding model provider."
  222. )
  223. parser.add_argument("retrieval_model", type=dict, location="json", help="Invalid retrieval model.")
  224. parser.add_argument("partial_member_list", type=list, location="json", help="Invalid parent user list.")
  225. parser.add_argument(
  226. "external_retrieval_model",
  227. type=dict,
  228. required=False,
  229. nullable=True,
  230. location="json",
  231. help="Invalid external retrieval model.",
  232. )
  233. parser.add_argument(
  234. "external_knowledge_id",
  235. type=str,
  236. required=False,
  237. nullable=True,
  238. location="json",
  239. help="Invalid external knowledge id.",
  240. )
  241. parser.add_argument(
  242. "external_knowledge_api_id",
  243. type=str,
  244. required=False,
  245. nullable=True,
  246. location="json",
  247. help="Invalid external knowledge api id.",
  248. )
  249. args = parser.parse_args()
  250. data = request.get_json()
  251. # check embedding model setting
  252. if data.get("indexing_technique") == "high_quality":
  253. DatasetService.check_embedding_model_setting(
  254. dataset.tenant_id, data.get("embedding_model_provider"), data.get("embedding_model")
  255. )
  256. # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator
  257. DatasetPermissionService.check_permission(
  258. current_user, dataset, data.get("permission"), data.get("partial_member_list")
  259. )
  260. dataset = DatasetService.update_dataset(dataset_id_str, args, current_user)
  261. if dataset is None:
  262. raise NotFound("Dataset not found.")
  263. result_data = marshal(dataset, dataset_detail_fields)
  264. tenant_id = current_user.current_tenant_id
  265. if data.get("partial_member_list") and data.get("permission") == "partial_members":
  266. DatasetPermissionService.update_partial_member_list(
  267. tenant_id, dataset_id_str, data.get("partial_member_list")
  268. )
  269. # clear partial member list when permission is only_me or all_team_members
  270. elif (
  271. data.get("permission") == DatasetPermissionEnum.ONLY_ME
  272. or data.get("permission") == DatasetPermissionEnum.ALL_TEAM
  273. ):
  274. DatasetPermissionService.clear_partial_member_list(dataset_id_str)
  275. partial_member_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  276. result_data.update({"partial_member_list": partial_member_list})
  277. return result_data, 200
  278. @setup_required
  279. @login_required
  280. @account_initialization_required
  281. @cloud_edition_billing_rate_limit_check("knowledge")
  282. def delete(self, dataset_id):
  283. dataset_id_str = str(dataset_id)
  284. # The role of the current user in the ta table must be admin, owner, or editor
  285. if not current_user.is_editor or current_user.is_dataset_operator:
  286. raise Forbidden()
  287. try:
  288. if DatasetService.delete_dataset(dataset_id_str, current_user):
  289. DatasetPermissionService.clear_partial_member_list(dataset_id_str)
  290. return {"result": "success"}, 204
  291. else:
  292. raise NotFound("Dataset not found.")
  293. except services.errors.dataset.DatasetInUseError:
  294. raise DatasetInUseError()
  295. class DatasetUseCheckApi(Resource):
  296. @setup_required
  297. @login_required
  298. @account_initialization_required
  299. def get(self, dataset_id):
  300. dataset_id_str = str(dataset_id)
  301. dataset_is_using = DatasetService.dataset_use_check(dataset_id_str)
  302. return {"is_using": dataset_is_using}, 200
  303. class DatasetQueryApi(Resource):
  304. @setup_required
  305. @login_required
  306. @account_initialization_required
  307. def get(self, dataset_id):
  308. dataset_id_str = str(dataset_id)
  309. dataset = DatasetService.get_dataset(dataset_id_str)
  310. if dataset is None:
  311. raise NotFound("Dataset not found.")
  312. try:
  313. DatasetService.check_dataset_permission(dataset, current_user)
  314. except services.errors.account.NoPermissionError as e:
  315. raise Forbidden(str(e))
  316. page = request.args.get("page", default=1, type=int)
  317. limit = request.args.get("limit", default=20, type=int)
  318. dataset_queries, total = DatasetService.get_dataset_queries(dataset_id=dataset.id, page=page, per_page=limit)
  319. response = {
  320. "data": marshal(dataset_queries, dataset_query_detail_fields),
  321. "has_more": len(dataset_queries) == limit,
  322. "limit": limit,
  323. "total": total,
  324. "page": page,
  325. }
  326. return response, 200
  327. class DatasetIndexingEstimateApi(Resource):
  328. @setup_required
  329. @login_required
  330. @account_initialization_required
  331. def post(self):
  332. parser = reqparse.RequestParser()
  333. parser.add_argument("info_list", type=dict, required=True, nullable=True, location="json")
  334. parser.add_argument("process_rule", type=dict, required=True, nullable=True, location="json")
  335. parser.add_argument(
  336. "indexing_technique",
  337. type=str,
  338. required=True,
  339. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  340. nullable=True,
  341. location="json",
  342. )
  343. parser.add_argument("doc_form", type=str, default="text_model", required=False, nullable=False, location="json")
  344. parser.add_argument("dataset_id", type=str, required=False, nullable=False, location="json")
  345. parser.add_argument(
  346. "doc_language", type=str, default="English", required=False, nullable=False, location="json"
  347. )
  348. args = parser.parse_args()
  349. # validate args
  350. DocumentService.estimate_args_validate(args)
  351. extract_settings = []
  352. if args["info_list"]["data_source_type"] == "upload_file":
  353. file_ids = args["info_list"]["file_info_list"]["file_ids"]
  354. file_details = (
  355. db.session.query(UploadFile)
  356. .filter(UploadFile.tenant_id == current_user.current_tenant_id, UploadFile.id.in_(file_ids))
  357. .all()
  358. )
  359. if file_details is None:
  360. raise NotFound("File not found.")
  361. if file_details:
  362. for file_detail in file_details:
  363. extract_setting = ExtractSetting(
  364. datasource_type="upload_file", upload_file=file_detail, document_model=args["doc_form"]
  365. )
  366. extract_settings.append(extract_setting)
  367. elif args["info_list"]["data_source_type"] == "notion_import":
  368. notion_info_list = args["info_list"]["notion_info_list"]
  369. for notion_info in notion_info_list:
  370. workspace_id = notion_info["workspace_id"]
  371. for page in notion_info["pages"]:
  372. extract_setting = ExtractSetting(
  373. datasource_type="notion_import",
  374. notion_info={
  375. "notion_workspace_id": workspace_id,
  376. "notion_obj_id": page["page_id"],
  377. "notion_page_type": page["type"],
  378. "tenant_id": current_user.current_tenant_id,
  379. },
  380. document_model=args["doc_form"],
  381. )
  382. extract_settings.append(extract_setting)
  383. elif args["info_list"]["data_source_type"] == "website_crawl":
  384. website_info_list = args["info_list"]["website_info_list"]
  385. for url in website_info_list["urls"]:
  386. extract_setting = ExtractSetting(
  387. datasource_type="website_crawl",
  388. website_info={
  389. "provider": website_info_list["provider"],
  390. "job_id": website_info_list["job_id"],
  391. "url": url,
  392. "tenant_id": current_user.current_tenant_id,
  393. "mode": "crawl",
  394. "only_main_content": website_info_list["only_main_content"],
  395. },
  396. document_model=args["doc_form"],
  397. )
  398. extract_settings.append(extract_setting)
  399. else:
  400. raise ValueError("Data source type not support")
  401. indexing_runner = IndexingRunner()
  402. try:
  403. response = indexing_runner.indexing_estimate(
  404. current_user.current_tenant_id,
  405. extract_settings,
  406. args["process_rule"],
  407. args["doc_form"],
  408. args["doc_language"],
  409. args["dataset_id"],
  410. args["indexing_technique"],
  411. )
  412. except LLMBadRequestError:
  413. raise ProviderNotInitializeError(
  414. "No Embedding Model available. Please configure a valid provider " "in the Settings -> Model Provider."
  415. )
  416. except ProviderTokenNotInitError as ex:
  417. raise ProviderNotInitializeError(ex.description)
  418. except Exception as e:
  419. raise IndexingEstimateError(str(e))
  420. return response.model_dump(), 200
  421. class DatasetRelatedAppListApi(Resource):
  422. @setup_required
  423. @login_required
  424. @account_initialization_required
  425. @marshal_with(related_app_list)
  426. def get(self, dataset_id):
  427. dataset_id_str = str(dataset_id)
  428. dataset = DatasetService.get_dataset(dataset_id_str)
  429. if dataset is None:
  430. raise NotFound("Dataset not found.")
  431. try:
  432. DatasetService.check_dataset_permission(dataset, current_user)
  433. except services.errors.account.NoPermissionError as e:
  434. raise Forbidden(str(e))
  435. app_dataset_joins = DatasetService.get_related_apps(dataset.id)
  436. related_apps = []
  437. for app_dataset_join in app_dataset_joins:
  438. app_model = app_dataset_join.app
  439. if app_model:
  440. related_apps.append(app_model)
  441. return {"data": related_apps, "total": len(related_apps)}, 200
  442. class DatasetIndexingStatusApi(Resource):
  443. @setup_required
  444. @login_required
  445. @account_initialization_required
  446. def get(self, dataset_id):
  447. dataset_id = str(dataset_id)
  448. documents = (
  449. db.session.query(Document)
  450. .filter(Document.dataset_id == dataset_id, Document.tenant_id == current_user.current_tenant_id)
  451. .all()
  452. )
  453. documents_status = []
  454. for document in documents:
  455. completed_segments = DocumentSegment.query.filter(
  456. DocumentSegment.completed_at.isnot(None),
  457. DocumentSegment.document_id == str(document.id),
  458. DocumentSegment.status != "re_segment",
  459. ).count()
  460. total_segments = DocumentSegment.query.filter(
  461. DocumentSegment.document_id == str(document.id), DocumentSegment.status != "re_segment"
  462. ).count()
  463. document.completed_segments = completed_segments
  464. document.total_segments = total_segments
  465. documents_status.append(marshal(document, document_status_fields))
  466. data = {"data": documents_status}
  467. return data
  468. class DatasetApiKeyApi(Resource):
  469. max_keys = 10
  470. token_prefix = "dataset-"
  471. resource_type = "dataset"
  472. @setup_required
  473. @login_required
  474. @account_initialization_required
  475. @marshal_with(api_key_list)
  476. def get(self):
  477. keys = (
  478. db.session.query(ApiToken)
  479. .filter(ApiToken.type == self.resource_type, ApiToken.tenant_id == current_user.current_tenant_id)
  480. .all()
  481. )
  482. return {"items": keys}
  483. @setup_required
  484. @login_required
  485. @account_initialization_required
  486. @marshal_with(api_key_fields)
  487. def post(self):
  488. # The role of the current user in the ta table must be admin or owner
  489. if not current_user.is_admin_or_owner:
  490. raise Forbidden()
  491. current_key_count = (
  492. db.session.query(ApiToken)
  493. .filter(ApiToken.type == self.resource_type, ApiToken.tenant_id == current_user.current_tenant_id)
  494. .count()
  495. )
  496. if current_key_count >= self.max_keys:
  497. flask_restful.abort(
  498. 400,
  499. message=f"Cannot create more than {self.max_keys} API keys for this resource type.",
  500. code="max_keys_exceeded",
  501. )
  502. key = ApiToken.generate_api_key(self.token_prefix, 24)
  503. api_token = ApiToken()
  504. api_token.tenant_id = current_user.current_tenant_id
  505. api_token.token = key
  506. api_token.type = self.resource_type
  507. db.session.add(api_token)
  508. db.session.commit()
  509. return api_token, 200
  510. class DatasetApiDeleteApi(Resource):
  511. resource_type = "dataset"
  512. @setup_required
  513. @login_required
  514. @account_initialization_required
  515. def delete(self, api_key_id):
  516. api_key_id = str(api_key_id)
  517. # The role of the current user in the ta table must be admin or owner
  518. if not current_user.is_admin_or_owner:
  519. raise Forbidden()
  520. key = (
  521. db.session.query(ApiToken)
  522. .filter(
  523. ApiToken.tenant_id == current_user.current_tenant_id,
  524. ApiToken.type == self.resource_type,
  525. ApiToken.id == api_key_id,
  526. )
  527. .first()
  528. )
  529. if key is None:
  530. flask_restful.abort(404, message="API key not found")
  531. db.session.query(ApiToken).filter(ApiToken.id == api_key_id).delete()
  532. db.session.commit()
  533. return {"result": "success"}, 204
  534. class DatasetApiBaseUrlApi(Resource):
  535. @setup_required
  536. @login_required
  537. @account_initialization_required
  538. def get(self):
  539. return {"api_base_url": (dify_config.SERVICE_API_URL or request.host_url.rstrip("/")) + "/v1"}
  540. class DatasetRetrievalSettingApi(Resource):
  541. @setup_required
  542. @login_required
  543. @account_initialization_required
  544. def get(self):
  545. vector_type = dify_config.VECTOR_STORE
  546. match vector_type:
  547. case (
  548. VectorType.MILVUS
  549. | VectorType.RELYT
  550. | VectorType.PGVECTOR
  551. | VectorType.TIDB_VECTOR
  552. | VectorType.CHROMA
  553. | VectorType.TENCENT
  554. | VectorType.PGVECTO_RS
  555. | VectorType.BAIDU
  556. | VectorType.VIKINGDB
  557. | VectorType.UPSTASH
  558. | VectorType.OCEANBASE
  559. ):
  560. return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}
  561. case (
  562. VectorType.QDRANT
  563. | VectorType.WEAVIATE
  564. | VectorType.OPENSEARCH
  565. | VectorType.ANALYTICDB
  566. | VectorType.MYSCALE
  567. | VectorType.ORACLE
  568. | VectorType.ELASTICSEARCH
  569. | VectorType.ELASTICSEARCH_JA
  570. | VectorType.PGVECTOR
  571. | VectorType.TIDB_ON_QDRANT
  572. | VectorType.LINDORM
  573. | VectorType.COUCHBASE
  574. ):
  575. return {
  576. "retrieval_method": [
  577. RetrievalMethod.SEMANTIC_SEARCH.value,
  578. RetrievalMethod.FULL_TEXT_SEARCH.value,
  579. RetrievalMethod.HYBRID_SEARCH.value,
  580. ]
  581. }
  582. case _:
  583. raise ValueError(f"Unsupported vector db type {vector_type}.")
  584. class DatasetRetrievalSettingMockApi(Resource):
  585. @setup_required
  586. @login_required
  587. @account_initialization_required
  588. def get(self, vector_type):
  589. match vector_type:
  590. case (
  591. VectorType.MILVUS
  592. | VectorType.RELYT
  593. | VectorType.TIDB_VECTOR
  594. | VectorType.CHROMA
  595. | VectorType.TENCENT
  596. | VectorType.PGVECTO_RS
  597. | VectorType.BAIDU
  598. | VectorType.VIKINGDB
  599. | VectorType.UPSTASH
  600. | VectorType.OCEANBASE
  601. ):
  602. return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}
  603. case (
  604. VectorType.QDRANT
  605. | VectorType.WEAVIATE
  606. | VectorType.OPENSEARCH
  607. | VectorType.ANALYTICDB
  608. | VectorType.MYSCALE
  609. | VectorType.ORACLE
  610. | VectorType.ELASTICSEARCH
  611. | VectorType.ELASTICSEARCH_JA
  612. | VectorType.COUCHBASE
  613. | VectorType.PGVECTOR
  614. | VectorType.LINDORM
  615. ):
  616. return {
  617. "retrieval_method": [
  618. RetrievalMethod.SEMANTIC_SEARCH.value,
  619. RetrievalMethod.FULL_TEXT_SEARCH.value,
  620. RetrievalMethod.HYBRID_SEARCH.value,
  621. ]
  622. }
  623. case _:
  624. raise ValueError(f"Unsupported vector db type {vector_type}.")
  625. class DatasetErrorDocs(Resource):
  626. @setup_required
  627. @login_required
  628. @account_initialization_required
  629. def get(self, dataset_id):
  630. dataset_id_str = str(dataset_id)
  631. dataset = DatasetService.get_dataset(dataset_id_str)
  632. if dataset is None:
  633. raise NotFound("Dataset not found.")
  634. results = DocumentService.get_error_documents_by_dataset_id(dataset_id_str)
  635. return {"data": [marshal(item, document_status_fields) for item in results], "total": len(results)}, 200
  636. class DatasetPermissionUserListApi(Resource):
  637. @setup_required
  638. @login_required
  639. @account_initialization_required
  640. def get(self, dataset_id):
  641. dataset_id_str = str(dataset_id)
  642. dataset = DatasetService.get_dataset(dataset_id_str)
  643. if dataset is None:
  644. raise NotFound("Dataset not found.")
  645. try:
  646. DatasetService.check_dataset_permission(dataset, current_user)
  647. except services.errors.account.NoPermissionError as e:
  648. raise Forbidden(str(e))
  649. partial_members_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  650. return {
  651. "data": partial_members_list,
  652. }, 200
  653. class DatasetAutoDisableLogApi(Resource):
  654. @setup_required
  655. @login_required
  656. @account_initialization_required
  657. def get(self, dataset_id):
  658. dataset_id_str = str(dataset_id)
  659. dataset = DatasetService.get_dataset(dataset_id_str)
  660. if dataset is None:
  661. raise NotFound("Dataset not found.")
  662. return DatasetService.get_dataset_auto_disable_logs(dataset_id_str), 200
  663. api.add_resource(DatasetListApi, "/datasets")
  664. api.add_resource(DatasetApi, "/datasets/<uuid:dataset_id>")
  665. api.add_resource(DatasetUseCheckApi, "/datasets/<uuid:dataset_id>/use-check")
  666. api.add_resource(DatasetQueryApi, "/datasets/<uuid:dataset_id>/queries")
  667. api.add_resource(DatasetErrorDocs, "/datasets/<uuid:dataset_id>/error-docs")
  668. api.add_resource(DatasetIndexingEstimateApi, "/datasets/indexing-estimate")
  669. api.add_resource(DatasetRelatedAppListApi, "/datasets/<uuid:dataset_id>/related-apps")
  670. api.add_resource(DatasetIndexingStatusApi, "/datasets/<uuid:dataset_id>/indexing-status")
  671. api.add_resource(DatasetApiKeyApi, "/datasets/api-keys")
  672. api.add_resource(DatasetApiDeleteApi, "/datasets/api-keys/<uuid:api_key_id>")
  673. api.add_resource(DatasetApiBaseUrlApi, "/datasets/api-base-info")
  674. api.add_resource(DatasetRetrievalSettingApi, "/datasets/retrieval-setting")
  675. api.add_resource(DatasetRetrievalSettingMockApi, "/datasets/retrieval-setting/<string:vector_type>")
  676. api.add_resource(DatasetPermissionUserListApi, "/datasets/<uuid:dataset_id>/permission-part-users")
  677. api.add_resource(DatasetAutoDisableLogApi, "/datasets/<uuid:dataset_id>/auto-disable-logs")