datasets.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401
  1. # -*- coding:utf-8 -*-
  2. from flask import request
  3. from flask_login import current_user
  4. from core.login.login import login_required
  5. from flask_restful import Resource, reqparse, fields, marshal, marshal_with
  6. from werkzeug.exceptions import NotFound, Forbidden
  7. import services
  8. from controllers.console import api
  9. from controllers.console.app.error import ProviderNotInitializeError
  10. from controllers.console.datasets.error import DatasetNameDuplicateError
  11. from controllers.console.setup import setup_required
  12. from controllers.console.wraps import account_initialization_required
  13. from core.indexing_runner import IndexingRunner
  14. from core.model_providers.error import LLMBadRequestError, ProviderTokenNotInitError
  15. from core.model_providers.model_factory import ModelFactory
  16. from core.model_providers.models.entity.model_params import ModelType
  17. from libs.helper import TimestampField
  18. from extensions.ext_database import db
  19. from models.dataset import DocumentSegment, Document
  20. from models.model import UploadFile
  21. from services.dataset_service import DatasetService, DocumentService
  22. from services.provider_service import ProviderService
  23. dataset_detail_fields = {
  24. 'id': fields.String,
  25. 'name': fields.String,
  26. 'description': fields.String,
  27. 'provider': fields.String,
  28. 'permission': fields.String,
  29. 'data_source_type': fields.String,
  30. 'indexing_technique': fields.String,
  31. 'app_count': fields.Integer,
  32. 'document_count': fields.Integer,
  33. 'word_count': fields.Integer,
  34. 'created_by': fields.String,
  35. 'created_at': TimestampField,
  36. 'updated_by': fields.String,
  37. 'updated_at': TimestampField,
  38. 'embedding_model': fields.String,
  39. 'embedding_model_provider': fields.String,
  40. 'embedding_available': fields.Boolean
  41. }
  42. dataset_query_detail_fields = {
  43. "id": fields.String,
  44. "content": fields.String,
  45. "source": fields.String,
  46. "source_app_id": fields.String,
  47. "created_by_role": fields.String,
  48. "created_by": fields.String,
  49. "created_at": TimestampField
  50. }
  51. def _validate_name(name):
  52. if not name or len(name) < 1 or len(name) > 40:
  53. raise ValueError('Name must be between 1 to 40 characters.')
  54. return name
  55. def _validate_description_length(description):
  56. if len(description) > 400:
  57. raise ValueError('Description cannot exceed 400 characters.')
  58. return description
  59. class DatasetListApi(Resource):
  60. @setup_required
  61. @login_required
  62. @account_initialization_required
  63. def get(self):
  64. page = request.args.get('page', default=1, type=int)
  65. limit = request.args.get('limit', default=20, type=int)
  66. ids = request.args.getlist('ids')
  67. provider = request.args.get('provider', default="vendor")
  68. if ids:
  69. datasets, total = DatasetService.get_datasets_by_ids(ids, current_user.current_tenant_id)
  70. else:
  71. datasets, total = DatasetService.get_datasets(page, limit, provider,
  72. current_user.current_tenant_id, current_user)
  73. # check embedding setting
  74. provider_service = ProviderService()
  75. valid_model_list = provider_service.get_valid_model_list(current_user.current_tenant_id, ModelType.EMBEDDINGS.value)
  76. # if len(valid_model_list) == 0:
  77. # raise ProviderNotInitializeError(
  78. # f"No Embedding Model available. Please configure a valid provider "
  79. # f"in the Settings -> Model Provider.")
  80. model_names = []
  81. for valid_model in valid_model_list:
  82. model_names.append(f"{valid_model['model_name']}:{valid_model['model_provider']['provider_name']}")
  83. data = marshal(datasets, dataset_detail_fields)
  84. for item in data:
  85. if item['indexing_technique'] == 'high_quality':
  86. item_model = f"{item['embedding_model']}:{item['embedding_model_provider']}"
  87. if item_model in model_names:
  88. item['embedding_available'] = True
  89. else:
  90. item['embedding_available'] = False
  91. else:
  92. item['embedding_available'] = True
  93. response = {
  94. 'data': data,
  95. 'has_more': len(datasets) == limit,
  96. 'limit': limit,
  97. 'total': total,
  98. 'page': page
  99. }
  100. return response, 200
  101. @setup_required
  102. @login_required
  103. @account_initialization_required
  104. def post(self):
  105. parser = reqparse.RequestParser()
  106. parser.add_argument('name', nullable=False, required=True,
  107. help='type is required. Name must be between 1 to 40 characters.',
  108. type=_validate_name)
  109. parser.add_argument('indexing_technique', type=str, location='json',
  110. choices=('high_quality', 'economy'),
  111. help='Invalid indexing technique.')
  112. args = parser.parse_args()
  113. # The role of the current user in the ta table must be admin or owner
  114. if current_user.current_tenant.current_role not in ['admin', 'owner']:
  115. raise Forbidden()
  116. try:
  117. dataset = DatasetService.create_empty_dataset(
  118. tenant_id=current_user.current_tenant_id,
  119. name=args['name'],
  120. indexing_technique=args['indexing_technique'],
  121. account=current_user
  122. )
  123. except services.errors.dataset.DatasetNameDuplicateError:
  124. raise DatasetNameDuplicateError()
  125. return marshal(dataset, dataset_detail_fields), 201
  126. class DatasetApi(Resource):
  127. @setup_required
  128. @login_required
  129. @account_initialization_required
  130. def get(self, dataset_id):
  131. dataset_id_str = str(dataset_id)
  132. dataset = DatasetService.get_dataset(dataset_id_str)
  133. if dataset is None:
  134. raise NotFound("Dataset not found.")
  135. try:
  136. DatasetService.check_dataset_permission(
  137. dataset, current_user)
  138. except services.errors.account.NoPermissionError as e:
  139. raise Forbidden(str(e))
  140. return marshal(dataset, dataset_detail_fields), 200
  141. @setup_required
  142. @login_required
  143. @account_initialization_required
  144. def patch(self, dataset_id):
  145. dataset_id_str = str(dataset_id)
  146. dataset = DatasetService.get_dataset(dataset_id_str)
  147. if dataset is None:
  148. raise NotFound("Dataset not found.")
  149. # check user's model setting
  150. DatasetService.check_dataset_model_setting(dataset)
  151. parser = reqparse.RequestParser()
  152. parser.add_argument('name', nullable=False,
  153. help='type is required. Name must be between 1 to 40 characters.',
  154. type=_validate_name)
  155. parser.add_argument('description',
  156. location='json', store_missing=False,
  157. type=_validate_description_length)
  158. parser.add_argument('indexing_technique', type=str, location='json',
  159. choices=('high_quality', 'economy'),
  160. help='Invalid indexing technique.')
  161. parser.add_argument('permission', type=str, location='json', choices=(
  162. 'only_me', 'all_team_members'), help='Invalid permission.')
  163. args = parser.parse_args()
  164. # The role of the current user in the ta table must be admin or owner
  165. if current_user.current_tenant.current_role not in ['admin', 'owner']:
  166. raise Forbidden()
  167. dataset = DatasetService.update_dataset(
  168. dataset_id_str, args, current_user)
  169. if dataset is None:
  170. raise NotFound("Dataset not found.")
  171. return marshal(dataset, dataset_detail_fields), 200
  172. @setup_required
  173. @login_required
  174. @account_initialization_required
  175. def delete(self, dataset_id):
  176. dataset_id_str = str(dataset_id)
  177. # The role of the current user in the ta table must be admin or owner
  178. if current_user.current_tenant.current_role not in ['admin', 'owner']:
  179. raise Forbidden()
  180. if DatasetService.delete_dataset(dataset_id_str, current_user):
  181. return {'result': 'success'}, 204
  182. else:
  183. raise NotFound("Dataset not found.")
  184. class DatasetQueryApi(Resource):
  185. @setup_required
  186. @login_required
  187. @account_initialization_required
  188. def get(self, dataset_id):
  189. dataset_id_str = str(dataset_id)
  190. dataset = DatasetService.get_dataset(dataset_id_str)
  191. if dataset is None:
  192. raise NotFound("Dataset not found.")
  193. try:
  194. DatasetService.check_dataset_permission(dataset, current_user)
  195. except services.errors.account.NoPermissionError as e:
  196. raise Forbidden(str(e))
  197. page = request.args.get('page', default=1, type=int)
  198. limit = request.args.get('limit', default=20, type=int)
  199. dataset_queries, total = DatasetService.get_dataset_queries(
  200. dataset_id=dataset.id,
  201. page=page,
  202. per_page=limit
  203. )
  204. response = {
  205. 'data': marshal(dataset_queries, dataset_query_detail_fields),
  206. 'has_more': len(dataset_queries) == limit,
  207. 'limit': limit,
  208. 'total': total,
  209. 'page': page
  210. }
  211. return response, 200
  212. class DatasetIndexingEstimateApi(Resource):
  213. @setup_required
  214. @login_required
  215. @account_initialization_required
  216. def post(self):
  217. parser = reqparse.RequestParser()
  218. parser.add_argument('info_list', type=dict, required=True, nullable=True, location='json')
  219. parser.add_argument('process_rule', type=dict, required=True, nullable=True, location='json')
  220. parser.add_argument('indexing_technique', type=str, required=True, nullable=True, location='json')
  221. parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
  222. parser.add_argument('dataset_id', type=str, required=False, nullable=False, location='json')
  223. parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False, location='json')
  224. args = parser.parse_args()
  225. # validate args
  226. DocumentService.estimate_args_validate(args)
  227. if args['info_list']['data_source_type'] == 'upload_file':
  228. file_ids = args['info_list']['file_info_list']['file_ids']
  229. file_details = db.session.query(UploadFile).filter(
  230. UploadFile.tenant_id == current_user.current_tenant_id,
  231. UploadFile.id.in_(file_ids)
  232. ).all()
  233. if file_details is None:
  234. raise NotFound("File not found.")
  235. indexing_runner = IndexingRunner()
  236. try:
  237. response = indexing_runner.file_indexing_estimate(current_user.current_tenant_id, file_details,
  238. args['process_rule'], args['doc_form'],
  239. args['doc_language'], args['dataset_id'],
  240. args['indexing_technique'])
  241. except LLMBadRequestError:
  242. raise ProviderNotInitializeError(
  243. f"No Embedding Model available. Please configure a valid provider "
  244. f"in the Settings -> Model Provider.")
  245. except ProviderTokenNotInitError as ex:
  246. raise ProviderNotInitializeError(ex.description)
  247. elif args['info_list']['data_source_type'] == 'notion_import':
  248. indexing_runner = IndexingRunner()
  249. try:
  250. response = indexing_runner.notion_indexing_estimate(current_user.current_tenant_id,
  251. args['info_list']['notion_info_list'],
  252. args['process_rule'], args['doc_form'],
  253. args['doc_language'], args['dataset_id'],
  254. args['indexing_technique'])
  255. except LLMBadRequestError:
  256. raise ProviderNotInitializeError(
  257. f"No Embedding Model available. Please configure a valid provider "
  258. f"in the Settings -> Model Provider.")
  259. except ProviderTokenNotInitError as ex:
  260. raise ProviderNotInitializeError(ex.description)
  261. else:
  262. raise ValueError('Data source type not support')
  263. return response, 200
  264. class DatasetRelatedAppListApi(Resource):
  265. app_detail_kernel_fields = {
  266. 'id': fields.String,
  267. 'name': fields.String,
  268. 'mode': fields.String,
  269. 'icon': fields.String,
  270. 'icon_background': fields.String,
  271. }
  272. related_app_list = {
  273. 'data': fields.List(fields.Nested(app_detail_kernel_fields)),
  274. 'total': fields.Integer,
  275. }
  276. @setup_required
  277. @login_required
  278. @account_initialization_required
  279. @marshal_with(related_app_list)
  280. def get(self, dataset_id):
  281. dataset_id_str = str(dataset_id)
  282. dataset = DatasetService.get_dataset(dataset_id_str)
  283. if dataset is None:
  284. raise NotFound("Dataset not found.")
  285. try:
  286. DatasetService.check_dataset_permission(dataset, current_user)
  287. except services.errors.account.NoPermissionError as e:
  288. raise Forbidden(str(e))
  289. app_dataset_joins = DatasetService.get_related_apps(dataset.id)
  290. related_apps = []
  291. for app_dataset_join in app_dataset_joins:
  292. app_model = app_dataset_join.app
  293. if app_model:
  294. related_apps.append(app_model)
  295. return {
  296. 'data': related_apps,
  297. 'total': len(related_apps)
  298. }, 200
  299. class DatasetIndexingStatusApi(Resource):
  300. document_status_fields = {
  301. 'id': fields.String,
  302. 'indexing_status': fields.String,
  303. 'processing_started_at': TimestampField,
  304. 'parsing_completed_at': TimestampField,
  305. 'cleaning_completed_at': TimestampField,
  306. 'splitting_completed_at': TimestampField,
  307. 'completed_at': TimestampField,
  308. 'paused_at': TimestampField,
  309. 'error': fields.String,
  310. 'stopped_at': TimestampField,
  311. 'completed_segments': fields.Integer,
  312. 'total_segments': fields.Integer,
  313. }
  314. document_status_fields_list = {
  315. 'data': fields.List(fields.Nested(document_status_fields))
  316. }
  317. @setup_required
  318. @login_required
  319. @account_initialization_required
  320. def get(self, dataset_id):
  321. dataset_id = str(dataset_id)
  322. documents = db.session.query(Document).filter(
  323. Document.dataset_id == dataset_id,
  324. Document.tenant_id == current_user.current_tenant_id
  325. ).all()
  326. documents_status = []
  327. for document in documents:
  328. completed_segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
  329. DocumentSegment.document_id == str(document.id),
  330. DocumentSegment.status != 're_segment').count()
  331. total_segments = DocumentSegment.query.filter(DocumentSegment.document_id == str(document.id),
  332. DocumentSegment.status != 're_segment').count()
  333. document.completed_segments = completed_segments
  334. document.total_segments = total_segments
  335. documents_status.append(marshal(document, self.document_status_fields))
  336. data = {
  337. 'data': documents_status
  338. }
  339. return data
  340. api.add_resource(DatasetListApi, '/datasets')
  341. api.add_resource(DatasetApi, '/datasets/<uuid:dataset_id>')
  342. api.add_resource(DatasetQueryApi, '/datasets/<uuid:dataset_id>/queries')
  343. api.add_resource(DatasetIndexingEstimateApi, '/datasets/indexing-estimate')
  344. api.add_resource(DatasetRelatedAppListApi, '/datasets/<uuid:dataset_id>/related-apps')
  345. api.add_resource(DatasetIndexingStatusApi, '/datasets/<uuid:dataset_id>/indexing-status')