datasets_document.py 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953
  1. # -*- coding:utf-8 -*-
  2. import random
  3. from datetime import datetime
  4. from typing import List
  5. from flask import request
  6. from flask_login import current_user
  7. from core.login.login import login_required
  8. from flask_restful import Resource, fields, marshal, marshal_with, reqparse
  9. from sqlalchemy import desc, asc
  10. from werkzeug.exceptions import NotFound, Forbidden
  11. import services
  12. from controllers.console import api
  13. from controllers.console.app.error import ProviderNotInitializeError, ProviderQuotaExceededError, \
  14. ProviderModelCurrentlyNotSupportError
  15. from controllers.console.datasets.error import DocumentAlreadyFinishedError, InvalidActionError, DocumentIndexingError, \
  16. InvalidMetadataError, ArchivedDocumentImmutableError
  17. from controllers.console.setup import setup_required
  18. from controllers.console.wraps import account_initialization_required
  19. from core.indexing_runner import IndexingRunner
  20. from core.model_providers.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError, \
  21. LLMBadRequestError
  22. from core.model_providers.model_factory import ModelFactory
  23. from extensions.ext_redis import redis_client
  24. from libs.helper import TimestampField
  25. from extensions.ext_database import db
  26. from models.dataset import DatasetProcessRule, Dataset
  27. from models.dataset import Document, DocumentSegment
  28. from models.model import UploadFile
  29. from services.dataset_service import DocumentService, DatasetService
  30. from tasks.add_document_to_index_task import add_document_to_index_task
  31. from tasks.remove_document_from_index_task import remove_document_from_index_task
  32. dataset_fields = {
  33. 'id': fields.String,
  34. 'name': fields.String,
  35. 'description': fields.String,
  36. 'permission': fields.String,
  37. 'data_source_type': fields.String,
  38. 'indexing_technique': fields.String,
  39. 'created_by': fields.String,
  40. 'created_at': TimestampField,
  41. }
  42. document_fields = {
  43. 'id': fields.String,
  44. 'position': fields.Integer,
  45. 'data_source_type': fields.String,
  46. 'data_source_info': fields.Raw(attribute='data_source_info_dict'),
  47. 'dataset_process_rule_id': fields.String,
  48. 'name': fields.String,
  49. 'created_from': fields.String,
  50. 'created_by': fields.String,
  51. 'created_at': TimestampField,
  52. 'tokens': fields.Integer,
  53. 'indexing_status': fields.String,
  54. 'error': fields.String,
  55. 'enabled': fields.Boolean,
  56. 'disabled_at': TimestampField,
  57. 'disabled_by': fields.String,
  58. 'archived': fields.Boolean,
  59. 'display_status': fields.String,
  60. 'word_count': fields.Integer,
  61. 'hit_count': fields.Integer,
  62. 'doc_form': fields.String,
  63. }
  64. document_with_segments_fields = {
  65. 'id': fields.String,
  66. 'position': fields.Integer,
  67. 'data_source_type': fields.String,
  68. 'data_source_info': fields.Raw(attribute='data_source_info_dict'),
  69. 'dataset_process_rule_id': fields.String,
  70. 'name': fields.String,
  71. 'created_from': fields.String,
  72. 'created_by': fields.String,
  73. 'created_at': TimestampField,
  74. 'tokens': fields.Integer,
  75. 'indexing_status': fields.String,
  76. 'error': fields.String,
  77. 'enabled': fields.Boolean,
  78. 'disabled_at': TimestampField,
  79. 'disabled_by': fields.String,
  80. 'archived': fields.Boolean,
  81. 'display_status': fields.String,
  82. 'word_count': fields.Integer,
  83. 'hit_count': fields.Integer,
  84. 'completed_segments': fields.Integer,
  85. 'total_segments': fields.Integer
  86. }
  87. class DocumentResource(Resource):
  88. def get_document(self, dataset_id: str, document_id: str) -> Document:
  89. dataset = DatasetService.get_dataset(dataset_id)
  90. if not dataset:
  91. raise NotFound('Dataset not found.')
  92. try:
  93. DatasetService.check_dataset_permission(dataset, current_user)
  94. except services.errors.account.NoPermissionError as e:
  95. raise Forbidden(str(e))
  96. document = DocumentService.get_document(dataset_id, document_id)
  97. if not document:
  98. raise NotFound('Document not found.')
  99. if document.tenant_id != current_user.current_tenant_id:
  100. raise Forbidden('No permission.')
  101. return document
  102. def get_batch_documents(self, dataset_id: str, batch: str) -> List[Document]:
  103. dataset = DatasetService.get_dataset(dataset_id)
  104. if not dataset:
  105. raise NotFound('Dataset not found.')
  106. try:
  107. DatasetService.check_dataset_permission(dataset, current_user)
  108. except services.errors.account.NoPermissionError as e:
  109. raise Forbidden(str(e))
  110. documents = DocumentService.get_batch_documents(dataset_id, batch)
  111. if not documents:
  112. raise NotFound('Documents not found.')
  113. return documents
  114. class GetProcessRuleApi(Resource):
  115. @setup_required
  116. @login_required
  117. @account_initialization_required
  118. def get(self):
  119. req_data = request.args
  120. document_id = req_data.get('document_id')
  121. if document_id:
  122. # get the latest process rule
  123. document = Document.query.get_or_404(document_id)
  124. dataset = DatasetService.get_dataset(document.dataset_id)
  125. if not dataset:
  126. raise NotFound('Dataset not found.')
  127. try:
  128. DatasetService.check_dataset_permission(dataset, current_user)
  129. except services.errors.account.NoPermissionError as e:
  130. raise Forbidden(str(e))
  131. # get the latest process rule
  132. dataset_process_rule = db.session.query(DatasetProcessRule). \
  133. filter(DatasetProcessRule.dataset_id == document.dataset_id). \
  134. order_by(DatasetProcessRule.created_at.desc()). \
  135. limit(1). \
  136. one_or_none()
  137. mode = dataset_process_rule.mode
  138. rules = dataset_process_rule.rules_dict
  139. else:
  140. mode = DocumentService.DEFAULT_RULES['mode']
  141. rules = DocumentService.DEFAULT_RULES['rules']
  142. return {
  143. 'mode': mode,
  144. 'rules': rules
  145. }
  146. class DatasetDocumentListApi(Resource):
  147. @setup_required
  148. @login_required
  149. @account_initialization_required
  150. def get(self, dataset_id):
  151. dataset_id = str(dataset_id)
  152. page = request.args.get('page', default=1, type=int)
  153. limit = request.args.get('limit', default=20, type=int)
  154. search = request.args.get('keyword', default=None, type=str)
  155. sort = request.args.get('sort', default='-created_at', type=str)
  156. fetch = request.args.get('fetch', default=False, type=bool)
  157. dataset = DatasetService.get_dataset(dataset_id)
  158. if not dataset:
  159. raise NotFound('Dataset not found.')
  160. try:
  161. DatasetService.check_dataset_permission(dataset, current_user)
  162. except services.errors.account.NoPermissionError as e:
  163. raise Forbidden(str(e))
  164. query = Document.query.filter_by(
  165. dataset_id=str(dataset_id), tenant_id=current_user.current_tenant_id)
  166. if search:
  167. search = f'%{search}%'
  168. query = query.filter(Document.name.like(search))
  169. if sort.startswith('-'):
  170. sort_logic = desc
  171. sort = sort[1:]
  172. else:
  173. sort_logic = asc
  174. if sort == 'hit_count':
  175. sub_query = db.select(DocumentSegment.document_id,
  176. db.func.sum(DocumentSegment.hit_count).label("total_hit_count")) \
  177. .group_by(DocumentSegment.document_id) \
  178. .subquery()
  179. query = query.outerjoin(sub_query, sub_query.c.document_id == Document.id) \
  180. .order_by(sort_logic(db.func.coalesce(sub_query.c.total_hit_count, 0)))
  181. elif sort == 'created_at':
  182. query = query.order_by(sort_logic(Document.created_at))
  183. else:
  184. query = query.order_by(desc(Document.created_at))
  185. paginated_documents = query.paginate(
  186. page=page, per_page=limit, max_per_page=100, error_out=False)
  187. documents = paginated_documents.items
  188. if fetch:
  189. for document in documents:
  190. completed_segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
  191. DocumentSegment.document_id == str(document.id),
  192. DocumentSegment.status != 're_segment').count()
  193. total_segments = DocumentSegment.query.filter(DocumentSegment.document_id == str(document.id),
  194. DocumentSegment.status != 're_segment').count()
  195. document.completed_segments = completed_segments
  196. document.total_segments = total_segments
  197. data = marshal(documents, document_with_segments_fields)
  198. else:
  199. data = marshal(documents, document_fields)
  200. response = {
  201. 'data': data,
  202. 'has_more': len(documents) == limit,
  203. 'limit': limit,
  204. 'total': paginated_documents.total,
  205. 'page': page
  206. }
  207. return response
  208. documents_and_batch_fields = {
  209. 'documents': fields.List(fields.Nested(document_fields)),
  210. 'batch': fields.String
  211. }
  212. @setup_required
  213. @login_required
  214. @account_initialization_required
  215. @marshal_with(documents_and_batch_fields)
  216. def post(self, dataset_id):
  217. dataset_id = str(dataset_id)
  218. dataset = DatasetService.get_dataset(dataset_id)
  219. if not dataset:
  220. raise NotFound('Dataset not found.')
  221. # The role of the current user in the ta table must be admin or owner
  222. if current_user.current_tenant.current_role not in ['admin', 'owner']:
  223. raise Forbidden()
  224. try:
  225. DatasetService.check_dataset_permission(dataset, current_user)
  226. except services.errors.account.NoPermissionError as e:
  227. raise Forbidden(str(e))
  228. parser = reqparse.RequestParser()
  229. parser.add_argument('indexing_technique', type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, nullable=False,
  230. location='json')
  231. parser.add_argument('data_source', type=dict, required=False, location='json')
  232. parser.add_argument('process_rule', type=dict, required=False, location='json')
  233. parser.add_argument('duplicate', type=bool, nullable=False, location='json')
  234. parser.add_argument('original_document_id', type=str, required=False, location='json')
  235. parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
  236. parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False, location='json')
  237. args = parser.parse_args()
  238. if not dataset.indexing_technique and not args['indexing_technique']:
  239. raise ValueError('indexing_technique is required.')
  240. # validate args
  241. DocumentService.document_create_args_validate(args)
  242. # check embedding model setting
  243. try:
  244. ModelFactory.get_embedding_model(
  245. tenant_id=current_user.current_tenant_id,
  246. model_provider_name=dataset.embedding_model_provider,
  247. model_name=dataset.embedding_model
  248. )
  249. except LLMBadRequestError:
  250. raise ProviderNotInitializeError(
  251. f"No Embedding Model available. Please configure a valid provider "
  252. f"in the Settings -> Model Provider.")
  253. except ProviderTokenNotInitError as ex:
  254. raise ProviderNotInitializeError(ex.description)
  255. try:
  256. documents, batch = DocumentService.save_document_with_dataset_id(dataset, args, current_user)
  257. except ProviderTokenNotInitError as ex:
  258. raise ProviderNotInitializeError(ex.description)
  259. except QuotaExceededError:
  260. raise ProviderQuotaExceededError()
  261. except ModelCurrentlyNotSupportError:
  262. raise ProviderModelCurrentlyNotSupportError()
  263. return {
  264. 'documents': documents,
  265. 'batch': batch
  266. }
  267. class DatasetInitApi(Resource):
  268. dataset_and_document_fields = {
  269. 'dataset': fields.Nested(dataset_fields),
  270. 'documents': fields.List(fields.Nested(document_fields)),
  271. 'batch': fields.String
  272. }
  273. @setup_required
  274. @login_required
  275. @account_initialization_required
  276. @marshal_with(dataset_and_document_fields)
  277. def post(self):
  278. # The role of the current user in the ta table must be admin or owner
  279. if current_user.current_tenant.current_role not in ['admin', 'owner']:
  280. raise Forbidden()
  281. parser = reqparse.RequestParser()
  282. parser.add_argument('indexing_technique', type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, required=True,
  283. nullable=False, location='json')
  284. parser.add_argument('data_source', type=dict, required=True, nullable=True, location='json')
  285. parser.add_argument('process_rule', type=dict, required=True, nullable=True, location='json')
  286. parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
  287. parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False, location='json')
  288. args = parser.parse_args()
  289. try:
  290. ModelFactory.get_embedding_model(
  291. tenant_id=current_user.current_tenant_id
  292. )
  293. except LLMBadRequestError:
  294. raise ProviderNotInitializeError(
  295. f"No Embedding Model available. Please configure a valid provider "
  296. f"in the Settings -> Model Provider.")
  297. # validate args
  298. DocumentService.document_create_args_validate(args)
  299. try:
  300. dataset, documents, batch = DocumentService.save_document_without_dataset_id(
  301. tenant_id=current_user.current_tenant_id,
  302. document_data=args,
  303. account=current_user
  304. )
  305. except ProviderTokenNotInitError as ex:
  306. raise ProviderNotInitializeError(ex.description)
  307. except QuotaExceededError:
  308. raise ProviderQuotaExceededError()
  309. except ModelCurrentlyNotSupportError:
  310. raise ProviderModelCurrentlyNotSupportError()
  311. response = {
  312. 'dataset': dataset,
  313. 'documents': documents,
  314. 'batch': batch
  315. }
  316. return response
  317. class DocumentIndexingEstimateApi(DocumentResource):
  318. @setup_required
  319. @login_required
  320. @account_initialization_required
  321. def get(self, dataset_id, document_id):
  322. dataset_id = str(dataset_id)
  323. document_id = str(document_id)
  324. document = self.get_document(dataset_id, document_id)
  325. if document.indexing_status in ['completed', 'error']:
  326. raise DocumentAlreadyFinishedError()
  327. data_process_rule = document.dataset_process_rule
  328. data_process_rule_dict = data_process_rule.to_dict()
  329. response = {
  330. "tokens": 0,
  331. "total_price": 0,
  332. "currency": "USD",
  333. "total_segments": 0,
  334. "preview": []
  335. }
  336. if document.data_source_type == 'upload_file':
  337. data_source_info = document.data_source_info_dict
  338. if data_source_info and 'upload_file_id' in data_source_info:
  339. file_id = data_source_info['upload_file_id']
  340. file = db.session.query(UploadFile).filter(
  341. UploadFile.tenant_id == document.tenant_id,
  342. UploadFile.id == file_id
  343. ).first()
  344. # raise error if file not found
  345. if not file:
  346. raise NotFound('File not found.')
  347. indexing_runner = IndexingRunner()
  348. try:
  349. response = indexing_runner.file_indexing_estimate(current_user.current_tenant_id, [file],
  350. data_process_rule_dict, None, dataset_id)
  351. except LLMBadRequestError:
  352. raise ProviderNotInitializeError(
  353. f"No Embedding Model available. Please configure a valid provider "
  354. f"in the Settings -> Model Provider.")
  355. except ProviderTokenNotInitError as ex:
  356. raise ProviderNotInitializeError(ex.description)
  357. return response
  358. class DocumentBatchIndexingEstimateApi(DocumentResource):
  359. @setup_required
  360. @login_required
  361. @account_initialization_required
  362. def get(self, dataset_id, batch):
  363. dataset_id = str(dataset_id)
  364. batch = str(batch)
  365. dataset = DatasetService.get_dataset(dataset_id)
  366. if dataset is None:
  367. raise NotFound("Dataset not found.")
  368. documents = self.get_batch_documents(dataset_id, batch)
  369. response = {
  370. "tokens": 0,
  371. "total_price": 0,
  372. "currency": "USD",
  373. "total_segments": 0,
  374. "preview": []
  375. }
  376. if not documents:
  377. return response
  378. data_process_rule = documents[0].dataset_process_rule
  379. data_process_rule_dict = data_process_rule.to_dict()
  380. info_list = []
  381. for document in documents:
  382. if document.indexing_status in ['completed', 'error']:
  383. raise DocumentAlreadyFinishedError()
  384. data_source_info = document.data_source_info_dict
  385. # format document files info
  386. if data_source_info and 'upload_file_id' in data_source_info:
  387. file_id = data_source_info['upload_file_id']
  388. info_list.append(file_id)
  389. # format document notion info
  390. elif data_source_info and 'notion_workspace_id' in data_source_info and 'notion_page_id' in data_source_info:
  391. pages = []
  392. page = {
  393. 'page_id': data_source_info['notion_page_id'],
  394. 'type': data_source_info['type']
  395. }
  396. pages.append(page)
  397. notion_info = {
  398. 'workspace_id': data_source_info['notion_workspace_id'],
  399. 'pages': pages
  400. }
  401. info_list.append(notion_info)
  402. if dataset.data_source_type == 'upload_file':
  403. file_details = db.session.query(UploadFile).filter(
  404. UploadFile.tenant_id == current_user.current_tenant_id,
  405. UploadFile.id in info_list
  406. ).all()
  407. if file_details is None:
  408. raise NotFound("File not found.")
  409. indexing_runner = IndexingRunner()
  410. try:
  411. response = indexing_runner.file_indexing_estimate(current_user.current_tenant_id, file_details,
  412. data_process_rule_dict, None, dataset_id)
  413. except LLMBadRequestError:
  414. raise ProviderNotInitializeError(
  415. f"No Embedding Model available. Please configure a valid provider "
  416. f"in the Settings -> Model Provider.")
  417. except ProviderTokenNotInitError as ex:
  418. raise ProviderNotInitializeError(ex.description)
  419. elif dataset.data_source_type == 'notion_import':
  420. indexing_runner = IndexingRunner()
  421. try:
  422. response = indexing_runner.notion_indexing_estimate(current_user.current_tenant_id,
  423. info_list,
  424. data_process_rule_dict,
  425. None, dataset_id)
  426. except LLMBadRequestError:
  427. raise ProviderNotInitializeError(
  428. f"No Embedding Model available. Please configure a valid provider "
  429. f"in the Settings -> Model Provider.")
  430. except ProviderTokenNotInitError as ex:
  431. raise ProviderNotInitializeError(ex.description)
  432. else:
  433. raise ValueError('Data source type not support')
  434. return response
  435. class DocumentBatchIndexingStatusApi(DocumentResource):
  436. document_status_fields = {
  437. 'id': fields.String,
  438. 'indexing_status': fields.String,
  439. 'processing_started_at': TimestampField,
  440. 'parsing_completed_at': TimestampField,
  441. 'cleaning_completed_at': TimestampField,
  442. 'splitting_completed_at': TimestampField,
  443. 'completed_at': TimestampField,
  444. 'paused_at': TimestampField,
  445. 'error': fields.String,
  446. 'stopped_at': TimestampField,
  447. 'completed_segments': fields.Integer,
  448. 'total_segments': fields.Integer,
  449. }
  450. document_status_fields_list = {
  451. 'data': fields.List(fields.Nested(document_status_fields))
  452. }
  453. @setup_required
  454. @login_required
  455. @account_initialization_required
  456. def get(self, dataset_id, batch):
  457. dataset_id = str(dataset_id)
  458. batch = str(batch)
  459. documents = self.get_batch_documents(dataset_id, batch)
  460. documents_status = []
  461. for document in documents:
  462. completed_segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
  463. DocumentSegment.document_id == str(document.id),
  464. DocumentSegment.status != 're_segment').count()
  465. total_segments = DocumentSegment.query.filter(DocumentSegment.document_id == str(document.id),
  466. DocumentSegment.status != 're_segment').count()
  467. document.completed_segments = completed_segments
  468. document.total_segments = total_segments
  469. if document.is_paused:
  470. document.indexing_status = 'paused'
  471. documents_status.append(marshal(document, self.document_status_fields))
  472. data = {
  473. 'data': documents_status
  474. }
  475. return data
  476. class DocumentIndexingStatusApi(DocumentResource):
  477. document_status_fields = {
  478. 'id': fields.String,
  479. 'indexing_status': fields.String,
  480. 'processing_started_at': TimestampField,
  481. 'parsing_completed_at': TimestampField,
  482. 'cleaning_completed_at': TimestampField,
  483. 'splitting_completed_at': TimestampField,
  484. 'completed_at': TimestampField,
  485. 'paused_at': TimestampField,
  486. 'error': fields.String,
  487. 'stopped_at': TimestampField,
  488. 'completed_segments': fields.Integer,
  489. 'total_segments': fields.Integer,
  490. }
  491. @setup_required
  492. @login_required
  493. @account_initialization_required
  494. def get(self, dataset_id, document_id):
  495. dataset_id = str(dataset_id)
  496. document_id = str(document_id)
  497. document = self.get_document(dataset_id, document_id)
  498. completed_segments = DocumentSegment.query \
  499. .filter(DocumentSegment.completed_at.isnot(None),
  500. DocumentSegment.document_id == str(document_id),
  501. DocumentSegment.status != 're_segment') \
  502. .count()
  503. total_segments = DocumentSegment.query \
  504. .filter(DocumentSegment.document_id == str(document_id),
  505. DocumentSegment.status != 're_segment') \
  506. .count()
  507. document.completed_segments = completed_segments
  508. document.total_segments = total_segments
  509. if document.is_paused:
  510. document.indexing_status = 'paused'
  511. return marshal(document, self.document_status_fields)
  512. class DocumentDetailApi(DocumentResource):
  513. METADATA_CHOICES = {'all', 'only', 'without'}
  514. @setup_required
  515. @login_required
  516. @account_initialization_required
  517. def get(self, dataset_id, document_id):
  518. dataset_id = str(dataset_id)
  519. document_id = str(document_id)
  520. document = self.get_document(dataset_id, document_id)
  521. metadata = request.args.get('metadata', 'all')
  522. if metadata not in self.METADATA_CHOICES:
  523. raise InvalidMetadataError(f'Invalid metadata value: {metadata}')
  524. if metadata == 'only':
  525. response = {
  526. 'id': document.id,
  527. 'doc_type': document.doc_type,
  528. 'doc_metadata': document.doc_metadata
  529. }
  530. elif metadata == 'without':
  531. process_rules = DatasetService.get_process_rules(dataset_id)
  532. data_source_info = document.data_source_detail_dict
  533. response = {
  534. 'id': document.id,
  535. 'position': document.position,
  536. 'data_source_type': document.data_source_type,
  537. 'data_source_info': data_source_info,
  538. 'dataset_process_rule_id': document.dataset_process_rule_id,
  539. 'dataset_process_rule': process_rules,
  540. 'name': document.name,
  541. 'created_from': document.created_from,
  542. 'created_by': document.created_by,
  543. 'created_at': document.created_at.timestamp(),
  544. 'tokens': document.tokens,
  545. 'indexing_status': document.indexing_status,
  546. 'completed_at': int(document.completed_at.timestamp()) if document.completed_at else None,
  547. 'updated_at': int(document.updated_at.timestamp()) if document.updated_at else None,
  548. 'indexing_latency': document.indexing_latency,
  549. 'error': document.error,
  550. 'enabled': document.enabled,
  551. 'disabled_at': int(document.disabled_at.timestamp()) if document.disabled_at else None,
  552. 'disabled_by': document.disabled_by,
  553. 'archived': document.archived,
  554. 'segment_count': document.segment_count,
  555. 'average_segment_length': document.average_segment_length,
  556. 'hit_count': document.hit_count,
  557. 'display_status': document.display_status,
  558. 'doc_form': document.doc_form
  559. }
  560. else:
  561. process_rules = DatasetService.get_process_rules(dataset_id)
  562. data_source_info = document.data_source_detail_dict_()
  563. response = {
  564. 'id': document.id,
  565. 'position': document.position,
  566. 'data_source_type': document.data_source_type,
  567. 'data_source_info': data_source_info,
  568. 'dataset_process_rule_id': document.dataset_process_rule_id,
  569. 'dataset_process_rule': process_rules,
  570. 'name': document.name,
  571. 'created_from': document.created_from,
  572. 'created_by': document.created_by,
  573. 'created_at': document.created_at.timestamp(),
  574. 'tokens': document.tokens,
  575. 'indexing_status': document.indexing_status,
  576. 'completed_at': int(document.completed_at.timestamp()) if document.completed_at else None,
  577. 'updated_at': int(document.updated_at.timestamp()) if document.updated_at else None,
  578. 'indexing_latency': document.indexing_latency,
  579. 'error': document.error,
  580. 'enabled': document.enabled,
  581. 'disabled_at': int(document.disabled_at.timestamp()) if document.disabled_at else None,
  582. 'disabled_by': document.disabled_by,
  583. 'archived': document.archived,
  584. 'doc_type': document.doc_type,
  585. 'doc_metadata': document.doc_metadata,
  586. 'segment_count': document.segment_count,
  587. 'average_segment_length': document.average_segment_length,
  588. 'hit_count': document.hit_count,
  589. 'display_status': document.display_status,
  590. 'doc_form': document.doc_form
  591. }
  592. return response, 200
  593. class DocumentProcessingApi(DocumentResource):
  594. @setup_required
  595. @login_required
  596. @account_initialization_required
  597. def patch(self, dataset_id, document_id, action):
  598. dataset_id = str(dataset_id)
  599. document_id = str(document_id)
  600. document = self.get_document(dataset_id, document_id)
  601. # The role of the current user in the ta table must be admin or owner
  602. if current_user.current_tenant.current_role not in ['admin', 'owner']:
  603. raise Forbidden()
  604. if action == "pause":
  605. if document.indexing_status != "indexing":
  606. raise InvalidActionError('Document not in indexing state.')
  607. document.paused_by = current_user.id
  608. document.paused_at = datetime.utcnow()
  609. document.is_paused = True
  610. db.session.commit()
  611. elif action == "resume":
  612. if document.indexing_status not in ["paused", "error"]:
  613. raise InvalidActionError('Document not in paused or error state.')
  614. document.paused_by = None
  615. document.paused_at = None
  616. document.is_paused = False
  617. db.session.commit()
  618. else:
  619. raise InvalidActionError()
  620. return {'result': 'success'}, 200
  621. class DocumentDeleteApi(DocumentResource):
  622. @setup_required
  623. @login_required
  624. @account_initialization_required
  625. def delete(self, dataset_id, document_id):
  626. dataset_id = str(dataset_id)
  627. document_id = str(document_id)
  628. document = self.get_document(dataset_id, document_id)
  629. try:
  630. DocumentService.delete_document(document)
  631. except services.errors.document.DocumentIndexingError:
  632. raise DocumentIndexingError('Cannot delete document during indexing.')
  633. return {'result': 'success'}, 204
  634. class DocumentMetadataApi(DocumentResource):
  635. @setup_required
  636. @login_required
  637. @account_initialization_required
  638. def put(self, dataset_id, document_id):
  639. dataset_id = str(dataset_id)
  640. document_id = str(document_id)
  641. document = self.get_document(dataset_id, document_id)
  642. req_data = request.get_json()
  643. doc_type = req_data.get('doc_type')
  644. doc_metadata = req_data.get('doc_metadata')
  645. # The role of the current user in the ta table must be admin or owner
  646. if current_user.current_tenant.current_role not in ['admin', 'owner']:
  647. raise Forbidden()
  648. if doc_type is None or doc_metadata is None:
  649. raise ValueError('Both doc_type and doc_metadata must be provided.')
  650. if doc_type not in DocumentService.DOCUMENT_METADATA_SCHEMA:
  651. raise ValueError('Invalid doc_type.')
  652. if not isinstance(doc_metadata, dict):
  653. raise ValueError('doc_metadata must be a dictionary.')
  654. metadata_schema = DocumentService.DOCUMENT_METADATA_SCHEMA[doc_type]
  655. document.doc_metadata = {}
  656. if doc_type == 'others':
  657. document.doc_metadata = doc_metadata
  658. else:
  659. for key, value_type in metadata_schema.items():
  660. value = doc_metadata.get(key)
  661. if value is not None and isinstance(value, value_type):
  662. document.doc_metadata[key] = value
  663. document.doc_type = doc_type
  664. document.updated_at = datetime.utcnow()
  665. db.session.commit()
  666. return {'result': 'success', 'message': 'Document metadata updated.'}, 200
  667. class DocumentStatusApi(DocumentResource):
  668. @setup_required
  669. @login_required
  670. @account_initialization_required
  671. def patch(self, dataset_id, document_id, action):
  672. dataset_id = str(dataset_id)
  673. document_id = str(document_id)
  674. document = self.get_document(dataset_id, document_id)
  675. # The role of the current user in the ta table must be admin or owner
  676. if current_user.current_tenant.current_role not in ['admin', 'owner']:
  677. raise Forbidden()
  678. indexing_cache_key = 'document_{}_indexing'.format(document.id)
  679. cache_result = redis_client.get(indexing_cache_key)
  680. if cache_result is not None:
  681. raise InvalidActionError("Document is being indexed, please try again later")
  682. if action == "enable":
  683. if document.enabled:
  684. raise InvalidActionError('Document already enabled.')
  685. document.enabled = True
  686. document.disabled_at = None
  687. document.disabled_by = None
  688. document.updated_at = datetime.utcnow()
  689. db.session.commit()
  690. # Set cache to prevent indexing the same document multiple times
  691. redis_client.setex(indexing_cache_key, 600, 1)
  692. add_document_to_index_task.delay(document_id)
  693. return {'result': 'success'}, 200
  694. elif action == "disable":
  695. if not document.completed_at or document.indexing_status != 'completed':
  696. raise InvalidActionError('Document is not completed.')
  697. if not document.enabled:
  698. raise InvalidActionError('Document already disabled.')
  699. document.enabled = False
  700. document.disabled_at = datetime.utcnow()
  701. document.disabled_by = current_user.id
  702. document.updated_at = datetime.utcnow()
  703. db.session.commit()
  704. # Set cache to prevent indexing the same document multiple times
  705. redis_client.setex(indexing_cache_key, 600, 1)
  706. remove_document_from_index_task.delay(document_id)
  707. return {'result': 'success'}, 200
  708. elif action == "archive":
  709. if document.archived:
  710. raise InvalidActionError('Document already archived.')
  711. document.archived = True
  712. document.archived_at = datetime.utcnow()
  713. document.archived_by = current_user.id
  714. document.updated_at = datetime.utcnow()
  715. db.session.commit()
  716. if document.enabled:
  717. # Set cache to prevent indexing the same document multiple times
  718. redis_client.setex(indexing_cache_key, 600, 1)
  719. remove_document_from_index_task.delay(document_id)
  720. return {'result': 'success'}, 200
  721. elif action == "un_archive":
  722. if not document.archived:
  723. raise InvalidActionError('Document is not archived.')
  724. document.archived = False
  725. document.archived_at = None
  726. document.archived_by = None
  727. document.updated_at = datetime.utcnow()
  728. db.session.commit()
  729. # Set cache to prevent indexing the same document multiple times
  730. redis_client.setex(indexing_cache_key, 600, 1)
  731. add_document_to_index_task.delay(document_id)
  732. return {'result': 'success'}, 200
  733. else:
  734. raise InvalidActionError()
  735. class DocumentPauseApi(DocumentResource):
  736. def patch(self, dataset_id, document_id):
  737. """pause document."""
  738. dataset_id = str(dataset_id)
  739. document_id = str(document_id)
  740. dataset = DatasetService.get_dataset(dataset_id)
  741. if not dataset:
  742. raise NotFound('Dataset not found.')
  743. document = DocumentService.get_document(dataset.id, document_id)
  744. # 404 if document not found
  745. if document is None:
  746. raise NotFound("Document Not Exists.")
  747. # 403 if document is archived
  748. if DocumentService.check_archived(document):
  749. raise ArchivedDocumentImmutableError()
  750. try:
  751. # pause document
  752. DocumentService.pause_document(document)
  753. except services.errors.document.DocumentIndexingError:
  754. raise DocumentIndexingError('Cannot pause completed document.')
  755. return {'result': 'success'}, 204
  756. class DocumentRecoverApi(DocumentResource):
  757. def patch(self, dataset_id, document_id):
  758. """recover document."""
  759. dataset_id = str(dataset_id)
  760. document_id = str(document_id)
  761. dataset = DatasetService.get_dataset(dataset_id)
  762. if not dataset:
  763. raise NotFound('Dataset not found.')
  764. document = DocumentService.get_document(dataset.id, document_id)
  765. # 404 if document not found
  766. if document is None:
  767. raise NotFound("Document Not Exists.")
  768. # 403 if document is archived
  769. if DocumentService.check_archived(document):
  770. raise ArchivedDocumentImmutableError()
  771. try:
  772. # pause document
  773. DocumentService.recover_document(document)
  774. except services.errors.document.DocumentIndexingError:
  775. raise DocumentIndexingError('Document is not in paused status.')
  776. return {'result': 'success'}, 204
  777. api.add_resource(GetProcessRuleApi, '/datasets/process-rule')
  778. api.add_resource(DatasetDocumentListApi,
  779. '/datasets/<uuid:dataset_id>/documents')
  780. api.add_resource(DatasetInitApi,
  781. '/datasets/init')
  782. api.add_resource(DocumentIndexingEstimateApi,
  783. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/indexing-estimate')
  784. api.add_resource(DocumentBatchIndexingEstimateApi,
  785. '/datasets/<uuid:dataset_id>/batch/<string:batch>/indexing-estimate')
  786. api.add_resource(DocumentBatchIndexingStatusApi,
  787. '/datasets/<uuid:dataset_id>/batch/<string:batch>/indexing-status')
  788. api.add_resource(DocumentIndexingStatusApi,
  789. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/indexing-status')
  790. api.add_resource(DocumentDetailApi,
  791. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>')
  792. api.add_resource(DocumentProcessingApi,
  793. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/<string:action>')
  794. api.add_resource(DocumentDeleteApi,
  795. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>')
  796. api.add_resource(DocumentMetadataApi,
  797. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/metadata')
  798. api.add_resource(DocumentStatusApi,
  799. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/status/<string:action>')
  800. api.add_resource(DocumentPauseApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/pause')
  801. api.add_resource(DocumentRecoverApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/resume')