Browse Source

FEAT: NEW WORKFLOW ENGINE (#3160)

Co-authored-by: Joel <iamjoel007@gmail.com>
Co-authored-by: Yeuoly <admin@srmxy.cn>
Co-authored-by: JzoNg <jzongcode@gmail.com>
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
Co-authored-by: jyong <jyong@dify.ai>
Co-authored-by: nite-knite <nkCoding@gmail.com>
Co-authored-by: jyong <718720800@qq.com>
takatost 1 year ago
parent
commit
7753ba2d37
100 changed files with 4164 additions and 2268 deletions
  1. 8 1
      .github/workflows/api-tests.yml
  2. 1 1
      .github/workflows/build-push.yml
  3. 0 26
      .github/workflows/tool-tests.yaml
  4. 12 1
      api/.env.example
  5. 63 1
      api/commands.py
  6. 14 1
      api/config.py
  7. 0 575
      api/constants/languages.py
  8. 52 30
      api/constants/model_template.py
  9. 534 0
      api/constants/recommended_apps.json
  10. 5 4
      api/controllers/console/__init__.py
  11. 0 21
      api/controllers/console/app/__init__.py
  12. 32 0
      api/controllers/console/app/agent.py
  13. 163 307
      api/controllers/console/app/app.py
  14. 14 15
      api/controllers/console/app/audio.py
  15. 22 53
      api/controllers/console/app/completion.py
  16. 26 38
      api/controllers/console/app/conversation.py
  17. 6 0
      api/controllers/console/app/error.py
  18. 1 1
      api/controllers/console/app/generator.py
  19. 25 106
      api/controllers/console/app/message.py
  20. 82 87
      api/controllers/console/app/model_config.py
  21. 5 9
      api/controllers/console/app/site.py
  22. 16 22
      api/controllers/console/app/statistic.py
  23. 324 0
      api/controllers/console/app/workflow.py
  24. 41 0
      api/controllers/console/app/workflow_app_log.py
  25. 109 0
      api/controllers/console/app/workflow_run.py
  26. 278 0
      api/controllers/console/app/workflow_statistic.py
  27. 55 0
      api/controllers/console/app/wraps.py
  28. 3 12
      api/controllers/console/explore/audio.py
  29. 16 29
      api/controllers/console/explore/completion.py
  30. 11 5
      api/controllers/console/explore/conversation.py
  31. 7 1
      api/controllers/console/explore/error.py
  32. 1 2
      api/controllers/console/explore/installed_app.py
  33. 13 23
      api/controllers/console/explore/message.py
  34. 39 49
      api/controllers/console/explore/parameter.py
  35. 15 98
      api/controllers/console/explore/recommended_app.py
  36. 85 0
      api/controllers/console/explore/workflow.py
  37. 17 0
      api/controllers/console/ping.py
  38. 1 14
      api/controllers/console/workspace/account.py
  39. 3 18
      api/controllers/console/workspace/members.py
  40. 54 9
      api/controllers/console/workspace/tool_providers.py
  41. 1 1
      api/controllers/files/tool_files.py
  42. 1 1
      api/controllers/service_api/__init__.py
  43. 35 45
      api/controllers/service_api/app/app.py
  44. 7 11
      api/controllers/service_api/app/audio.py
  45. 15 27
      api/controllers/service_api/app/completion.py
  46. 8 4
      api/controllers/service_api/app/conversation.py
  47. 7 1
      api/controllers/service_api/app/error.py
  48. 20 7
      api/controllers/service_api/app/message.py
  49. 87 0
      api/controllers/service_api/app/workflow.py
  50. 1 1
      api/controllers/web/__init__.py
  51. 35 45
      api/controllers/web/app.py
  52. 4 14
      api/controllers/web/audio.py
  53. 15 26
      api/controllers/web/completion.py
  54. 11 5
      api/controllers/web/conversation.py
  55. 7 1
      api/controllers/web/error.py
  56. 16 24
      api/controllers/web/message.py
  57. 0 4
      api/controllers/web/site.py
  58. 82 0
      api/controllers/web/workflow.py
  59. 0 0
      api/core/agent/__init__.py
  60. 50 183
      api/core/agent/base_agent_runner.py
  61. 110 90
      api/core/agent/cot_agent_runner.py
  62. 61 0
      api/core/agent/entities.py
  63. 69 82
      api/core/agent/fc_agent_runner.py
  64. 0 0
      api/core/app/__init__.py
  65. 0 0
      api/core/app/app_config/__init__.py
  66. 76 0
      api/core/app/app_config/base_app_config_manager.py
  67. 0 0
      api/core/app/app_config/common/__init__.py
  68. 0 0
      api/core/app/app_config/common/sensitive_word_avoidance/__init__.py
  69. 50 0
      api/core/app/app_config/common/sensitive_word_avoidance/manager.py
  70. 0 0
      api/core/app/app_config/easy_ui_based_app/__init__.py
  71. 0 0
      api/core/app/app_config/easy_ui_based_app/agent/__init__.py
  72. 78 0
      api/core/app/app_config/easy_ui_based_app/agent/manager.py
  73. 0 0
      api/core/app/app_config/easy_ui_based_app/dataset/__init__.py
  74. 224 0
      api/core/app/app_config/easy_ui_based_app/dataset/manager.py
  75. 0 0
      api/core/app/app_config/easy_ui_based_app/model_config/__init__.py
  76. 103 0
      api/core/app/app_config/easy_ui_based_app/model_config/converter.py
  77. 112 0
      api/core/app/app_config/easy_ui_based_app/model_config/manager.py
  78. 0 0
      api/core/app/app_config/easy_ui_based_app/prompt_template/__init__.py
  79. 140 0
      api/core/app/app_config/easy_ui_based_app/prompt_template/manager.py
  80. 0 0
      api/core/app/app_config/easy_ui_based_app/variables/__init__.py
  81. 186 0
      api/core/app/app_config/easy_ui_based_app/variables/manager.py
  82. 70 137
      api/core/app/app_config/entities.py
  83. 0 0
      api/core/app/app_config/features/__init__.py
  84. 0 0
      api/core/app/app_config/features/file_upload/__init__.py
  85. 68 0
      api/core/app/app_config/features/file_upload/manager.py
  86. 0 0
      api/core/app/app_config/features/more_like_this/__init__.py
  87. 38 0
      api/core/app/app_config/features/more_like_this/manager.py
  88. 0 0
      api/core/app/app_config/features/opening_statement/__init__.py
  89. 43 0
      api/core/app/app_config/features/opening_statement/manager.py
  90. 0 0
      api/core/app/app_config/features/retrieval_resource/__init__.py
  91. 33 0
      api/core/app/app_config/features/retrieval_resource/manager.py
  92. 0 0
      api/core/app/app_config/features/speech_to_text/__init__.py
  93. 38 0
      api/core/app/app_config/features/speech_to_text/manager.py
  94. 0 0
      api/core/app/app_config/features/suggested_questions_after_answer/__init__.py
  95. 39 0
      api/core/app/app_config/features/suggested_questions_after_answer/manager.py
  96. 0 0
      api/core/app/app_config/features/text_to_speech/__init__.py
  97. 49 0
      api/core/app/app_config/features/text_to_speech/manager.py
  98. 0 0
      api/core/app/app_config/workflow_ui_based_app/__init__.py
  99. 0 0
      api/core/app/app_config/workflow_ui_based_app/variables/__init__.py
  100. 22 0
      api/core/app/app_config/workflow_ui_based_app/variables/manager.py

+ 8 - 1
.github/workflows/api-model-runtime-tests.yml → .github/workflows/api-tests.yml

@@ -26,6 +26,7 @@ jobs:
       HUGGINGFACE_TEXT2TEXT_GEN_ENDPOINT_URL: b
       HUGGINGFACE_EMBEDDINGS_ENDPOINT_URL: c
       MOCK_SWITCH: true
+      CODE_MAX_STRING_LENGTH: 80000
 
     steps:
       - name: Checkout code
@@ -41,5 +42,11 @@ jobs:
       - name: Install dependencies
         run: pip install -r ./api/requirements.txt
 
-      - name: Run pytest
+      - name: Run ModelRuntime
         run: pytest api/tests/integration_tests/model_runtime/anthropic api/tests/integration_tests/model_runtime/azure_openai api/tests/integration_tests/model_runtime/openai api/tests/integration_tests/model_runtime/chatglm api/tests/integration_tests/model_runtime/google api/tests/integration_tests/model_runtime/xinference api/tests/integration_tests/model_runtime/huggingface_hub/test_llm.py
+
+      - name: Run Tool
+        run: pytest api/tests/integration_tests/tools/test_all_provider.py
+
+      - name: Run Workflow
+        run: pytest api/tests/integration_tests/workflow

+ 1 - 1
.github/workflows/build-push.yml

@@ -46,7 +46,7 @@ jobs:
         with:
           images: ${{ env[matrix.image_name_env] }}
           tags: |
-            type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }}
+            type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' && startsWith(github.ref, 'refs/tags/') }}
             type=ref,event=branch
             type=sha,enable=true,priority=100,prefix=,suffix=,format=long
             type=raw,value=${{ github.ref_name }},enable=${{ startsWith(github.ref, 'refs/tags/') }}

+ 0 - 26
.github/workflows/tool-tests.yaml

@@ -1,26 +0,0 @@
-name: Run Tool Pytest
-
-on:
-  pull_request:
-    branches:
-      - main
-
-jobs:
-  test:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Checkout code
-        uses: actions/checkout@v4
-
-      - name: Set up Python
-        uses: actions/setup-python@v5
-        with:
-          python-version: '3.10'
-          cache: 'pip'
-          cache-dependency-path: ./api/requirements.txt
-
-      - name: Install dependencies
-        run: pip install -r ./api/requirements.txt
-
-      - name: Run pytest
-        run: pytest ./api/tests/integration_tests/tools/test_all_provider.py

+ 12 - 1
api/.env.example

@@ -137,4 +137,15 @@ SSRF_PROXY_HTTP_URL=
 SSRF_PROXY_HTTPS_URL=
 
 BATCH_UPLOAD_LIMIT=10
-KEYWORD_DATA_SOURCE_TYPE=database
+KEYWORD_DATA_SOURCE_TYPE=database
+
+# CODE EXECUTION CONFIGURATION
+CODE_EXECUTION_ENDPOINT=http://127.0.0.1:8194
+CODE_EXECUTION_API_KEY=dify-sandbox
+CODE_MAX_NUMBER=9223372036854775807
+CODE_MIN_NUMBER=-9223372036854775808
+CODE_MAX_STRING_LENGTH=80000
+TEMPLATE_TRANSFORM_MAX_LENGTH=80000
+CODE_MAX_STRING_ARRAY_LENGTH=30
+CODE_MAX_OBJECT_ARRAY_LENGTH=30
+CODE_MAX_NUMBER_ARRAY_LENGTH=1000

+ 63 - 1
api/commands.py

@@ -15,7 +15,7 @@ from libs.rsa import generate_key_pair
 from models.account import Tenant
 from models.dataset import Dataset, DatasetCollectionBinding, DocumentSegment
 from models.dataset import Document as DatasetDocument
-from models.model import Account, App, AppAnnotationSetting, MessageAnnotation
+from models.model import Account, App, AppAnnotationSetting, AppMode, Conversation, MessageAnnotation
 from models.provider import Provider, ProviderModel
 
 
@@ -371,8 +371,70 @@ def migrate_knowledge_vector_database():
                     fg='green'))
 
 
+@click.command('convert-to-agent-apps', help='Convert Agent Assistant to Agent App.')
+def convert_to_agent_apps():
+    """
+    Convert Agent Assistant to Agent App.
+    """
+    click.echo(click.style('Start convert to agent apps.', fg='green'))
+
+    proceeded_app_ids = []
+
+    while True:
+        # fetch first 1000 apps
+        sql_query = """SELECT a.id AS id FROM apps a
+            INNER JOIN app_model_configs am ON a.app_model_config_id=am.id
+            WHERE a.mode = 'chat' 
+            AND am.agent_mode is not null 
+            AND (
+				am.agent_mode like '%"strategy": "function_call"%' 
+                OR am.agent_mode  like '%"strategy": "react"%'
+			) 
+            AND (
+				am.agent_mode like '{"enabled": true%' 
+                OR am.agent_mode like '{"max_iteration": %'
+			) ORDER BY a.created_at DESC LIMIT 1000
+        """
+
+        with db.engine.begin() as conn:
+            rs = conn.execute(db.text(sql_query))
+
+            apps = []
+            for i in rs:
+                app_id = str(i.id)
+                if app_id not in proceeded_app_ids:
+                    proceeded_app_ids.append(app_id)
+                    app = db.session.query(App).filter(App.id == app_id).first()
+                    apps.append(app)
+
+            if len(apps) == 0:
+                break
+
+        for app in apps:
+            click.echo('Converting app: {}'.format(app.id))
+
+            try:
+                app.mode = AppMode.AGENT_CHAT.value
+                db.session.commit()
+
+                # update conversation mode to agent
+                db.session.query(Conversation).filter(Conversation.app_id == app.id).update(
+                    {Conversation.mode: AppMode.AGENT_CHAT.value}
+                )
+
+                db.session.commit()
+                click.echo(click.style('Converted app: {}'.format(app.id), fg='green'))
+            except Exception as e:
+                click.echo(
+                    click.style('Convert app error: {} {}'.format(e.__class__.__name__,
+                                                                  str(e)), fg='red'))
+
+    click.echo(click.style('Congratulations! Converted {} agent apps.'.format(len(proceeded_app_ids)), fg='green'))
+
+
 def register_commands(app):
     app.cli.add_command(reset_password)
     app.cli.add_command(reset_email)
     app.cli.add_command(reset_encrypt_key_pair)
     app.cli.add_command(vdb_migrate)
+    app.cli.add_command(convert_to_agent_apps)

+ 14 - 1
api/config.py

@@ -28,6 +28,7 @@ DEFAULTS = {
     'CHECK_UPDATE_URL': 'https://updates.dify.ai',
     'DEPLOY_ENV': 'PRODUCTION',
     'SQLALCHEMY_POOL_SIZE': 30,
+    'SQLALCHEMY_MAX_OVERFLOW': 10,
     'SQLALCHEMY_POOL_RECYCLE': 3600,
     'SQLALCHEMY_ECHO': 'False',
     'SENTRY_TRACES_SAMPLE_RATE': 1.0,
@@ -49,6 +50,8 @@ DEFAULTS = {
     'HOSTED_ANTHROPIC_PAID_ENABLED': 'False',
     'HOSTED_MODERATION_ENABLED': 'False',
     'HOSTED_MODERATION_PROVIDERS': '',
+    'HOSTED_FETCH_APP_TEMPLATES_MODE': 'remote',
+    'HOSTED_FETCH_APP_TEMPLATES_REMOTE_DOMAIN': 'https://tmpl.dify.ai',
     'CLEAN_DAY_SETTING': 30,
     'UPLOAD_FILE_SIZE_LIMIT': 15,
     'UPLOAD_FILE_BATCH_LIMIT': 5,
@@ -61,6 +64,8 @@ DEFAULTS = {
     'ETL_TYPE': 'dify',
     'KEYWORD_STORE': 'jieba',
     'BATCH_UPLOAD_LIMIT': 20,
+    'CODE_EXECUTION_ENDPOINT': '',
+    'CODE_EXECUTION_API_KEY': '',
     'TOOL_ICON_CACHE_MAX_AGE': 3600,
     'KEYWORD_DATA_SOURCE_TYPE': 'database',
 }
@@ -93,7 +98,7 @@ class Config:
         # ------------------------
         # General Configurations.
         # ------------------------
-        self.CURRENT_VERSION = "0.5.11-fix1"
+        self.CURRENT_VERSION = "0.6.0"
         self.COMMIT_SHA = get_env('COMMIT_SHA')
         self.EDITION = "SELF_HOSTED"
         self.DEPLOY_ENV = get_env('DEPLOY_ENV')
@@ -149,6 +154,7 @@ class Config:
         self.SQLALCHEMY_DATABASE_URI = f"postgresql://{db_credentials['DB_USERNAME']}:{db_credentials['DB_PASSWORD']}@{db_credentials['DB_HOST']}:{db_credentials['DB_PORT']}/{db_credentials['DB_DATABASE']}{db_extras}"
         self.SQLALCHEMY_ENGINE_OPTIONS = {
             'pool_size': int(get_env('SQLALCHEMY_POOL_SIZE')),
+            'max_overflow': int(get_env('SQLALCHEMY_MAX_OVERFLOW')),
             'pool_recycle': int(get_env('SQLALCHEMY_POOL_RECYCLE'))
         }
 
@@ -294,6 +300,10 @@ class Config:
         self.HOSTED_MODERATION_ENABLED = get_bool_env('HOSTED_MODERATION_ENABLED')
         self.HOSTED_MODERATION_PROVIDERS = get_env('HOSTED_MODERATION_PROVIDERS')
 
+        # fetch app templates mode, remote, builtin, db(only for dify SaaS), default: remote
+        self.HOSTED_FETCH_APP_TEMPLATES_MODE = get_env('HOSTED_FETCH_APP_TEMPLATES_MODE')
+        self.HOSTED_FETCH_APP_TEMPLATES_REMOTE_DOMAIN = get_env('HOSTED_FETCH_APP_TEMPLATES_REMOTE_DOMAIN')
+
         self.ETL_TYPE = get_env('ETL_TYPE')
         self.UNSTRUCTURED_API_URL = get_env('UNSTRUCTURED_API_URL')
         self.BILLING_ENABLED = get_bool_env('BILLING_ENABLED')
@@ -301,6 +311,9 @@ class Config:
 
         self.BATCH_UPLOAD_LIMIT = get_env('BATCH_UPLOAD_LIMIT')
 
+        self.CODE_EXECUTION_ENDPOINT = get_env('CODE_EXECUTION_ENDPOINT')
+        self.CODE_EXECUTION_API_KEY = get_env('CODE_EXECUTION_API_KEY')
+
         self.API_COMPRESSION_ENABLED = get_bool_env('API_COMPRESSION_ENABLED')
         self.TOOL_ICON_CACHE_MAX_AGE = get_env('TOOL_ICON_CACHE_MAX_AGE')
 

+ 0 - 575
api/constants/languages.py

@@ -1,6 +1,4 @@
-import json
 
-from models.model import AppModelConfig
 
 languages = ['en-US', 'zh-Hans', 'pt-BR', 'es-ES', 'fr-FR', 'de-DE', 'ja-JP', 'ko-KR', 'ru-RU', 'it-IT', 'uk-UA', 'vi-VN']
 
@@ -27,576 +25,3 @@ def supported_language(lang):
     error = ('{lang} is not a valid language.'
              .format(lang=lang))
     raise ValueError(error)
-
-
-user_input_form_template = {
-    "en-US": [
-        {
-            "paragraph": {
-                "label": "Query",
-                "variable": "default_input",
-                "required": False,
-                "default": ""
-            }
-        }
-    ],
-    "zh-Hans": [
-        {
-            "paragraph": {
-                "label": "查询内容",
-                "variable": "default_input",
-                "required": False,
-                "default": ""
-            }
-        }
-    ],
-    "pt-BR": [
-        {
-            "paragraph": {
-                "label": "Consulta",
-                "variable": "default_input",
-                "required": False,
-                "default": ""
-            }
-        }
-    ],
-    "es-ES": [
-        {
-            "paragraph": {
-                "label": "Consulta",
-                "variable": "default_input",
-                "required": False,
-                "default": ""
-            }
-        }
-    ],
-    "ua-UK": [
-        {
-            "paragraph": {
-                "label": "Запит",
-                "variable": "default_input",
-                "required": False,
-                "default": ""
-            }
-        }
-    ],
-     "vi-VN": [
-        {
-            "paragraph": {
-                "label": "Nội dung truy vấn",
-                "variable": "default_input",
-                "required": False,
-                "default": ""
-            }
-        }
-    ],
-}
-
-demo_model_templates = {
-    'en-US': [
-        {
-            'name': 'Translation Assistant',
-            'icon': '',
-            'icon_background': '',
-            'description': 'A multilingual translator that provides translation capabilities in multiple languages, translating user input into the language they need.',
-            'mode': 'completion',
-            'model_config': AppModelConfig(
-                provider='openai',
-                model_id='gpt-3.5-turbo-instruct',
-                configs={
-                    'prompt_template': "Please translate the following text into {{target_language}}:\n",
-                    'prompt_variables': [
-                        {
-                            "key": "target_language",
-                            "name": "Target Language",
-                            "description": "The language you want to translate into.",
-                            "type": "select",
-                            "default": "Chinese",
-                            'options': [
-                                'Chinese',
-                                'English',
-                                'Japanese',
-                                'French',
-                                'Russian',
-                                'German',
-                                'Spanish',
-                                'Korean',
-                                'Italian',
-                            ]
-                        }
-                    ],
-                    'completion_params': {
-                        'max_token': 1000,
-                        'temperature': 0,
-                        'top_p': 0,
-                        'presence_penalty': 0.1,
-                        'frequency_penalty': 0.1,
-                    }
-                },
-                opening_statement='',
-                suggested_questions=None,
-                pre_prompt="Please translate the following text into {{target_language}}:\n{{query}}\ntranslate:",
-                model=json.dumps({
-                    "provider": "openai",
-                    "name": "gpt-3.5-turbo-instruct",
-                    "mode": "completion",
-                    "completion_params": {
-                        "max_tokens": 1000,
-                        "temperature": 0,
-                        "top_p": 0,
-                        "presence_penalty": 0.1,
-                        "frequency_penalty": 0.1
-                    }
-                }),
-                user_input_form=json.dumps([
-                    {
-                        "select": {
-                            "label": "Target Language",
-                            "variable": "target_language",
-                            "description": "The language you want to translate into.",
-                            "default": "Chinese",
-                            "required": True,
-                            'options': [
-                                'Chinese',
-                                'English',
-                                'Japanese',
-                                'French',
-                                'Russian',
-                                'German',
-                                'Spanish',
-                                'Korean',
-                                'Italian',
-                            ]
-                        }
-                    }, {
-                        "paragraph": {
-                            "label": "Query",
-                            "variable": "query",
-                            "required": True,
-                            "default": ""
-                        }
-                    }
-                ])
-            )
-        },
-        {
-            'name': 'AI Front-end Interviewer',
-            'icon': '',
-            'icon_background': '',
-            'description': 'A simulated front-end interviewer that tests the skill level of front-end development through questioning.',
-            'mode': 'chat',
-            'model_config': AppModelConfig(
-                provider='openai',
-                model_id='gpt-3.5-turbo',
-                configs={
-                    'introduction': 'Hi, welcome to our interview. I am the interviewer for this technology company, and I will test your web front-end development skills. Next, I will ask you some technical questions. Please answer them as thoroughly as possible. ',
-                    'prompt_template': "You will play the role of an interviewer for a technology company, examining the user's web front-end development skills and posing 5-10 sharp technical questions.\n\nPlease note:\n- Only ask one question at a time.\n- After the user answers a question, ask the next question directly, without trying to correct any mistakes made by the candidate.\n- If you think the user has not answered correctly for several consecutive questions, ask fewer questions.\n- After asking the last question, you can ask this question: Why did you leave your last job? After the user answers this question, please express your understanding and support.\n",
-                    'prompt_variables': [],
-                    'completion_params': {
-                        'max_token': 300,
-                        'temperature': 0.8,
-                        'top_p': 0.9,
-                        'presence_penalty': 0.1,
-                        'frequency_penalty': 0.1,
-                    }
-                },
-                opening_statement='Hi, welcome to our interview. I am the interviewer for this technology company, and I will test your web front-end development skills. Next, I will ask you some technical questions. Please answer them as thoroughly as possible. ',
-                suggested_questions=None,
-                pre_prompt="You will play the role of an interviewer for a technology company, examining the user's web front-end development skills and posing 5-10 sharp technical questions.\n\nPlease note:\n- Only ask one question at a time.\n- After the user answers a question, ask the next question directly, without trying to correct any mistakes made by the candidate.\n- If you think the user has not answered correctly for several consecutive questions, ask fewer questions.\n- After asking the last question, you can ask this question: Why did you leave your last job? After the user answers this question, please express your understanding and support.\n",
-                model=json.dumps({
-                    "provider": "openai",
-                    "name": "gpt-3.5-turbo",
-                    "mode": "chat",
-                    "completion_params": {
-                        "max_tokens": 300,
-                        "temperature": 0.8,
-                        "top_p": 0.9,
-                        "presence_penalty": 0.1,
-                        "frequency_penalty": 0.1
-                    }
-                }),
-                user_input_form=None
-            )
-        }
-    ],
-    'zh-Hans': [
-        {
-            'name': '翻译助手',
-            'icon': '',
-            'icon_background': '',
-            'description': '一个多语言翻译器,提供多种语言翻译能力,将用户输入的文本翻译成他们需要的语言。',
-            'mode': 'completion',
-            'model_config': AppModelConfig(
-                provider='openai',
-                model_id='gpt-3.5-turbo-instruct',
-                configs={
-                    'prompt_template': "请将以下文本翻译为{{target_language}}:\n",
-                    'prompt_variables': [
-                        {
-                            "key": "target_language",
-                            "name": "目标语言",
-                            "description": "翻译的目标语言",
-                            "type": "select",
-                            "default": "中文",
-                            "options": [
-                                "中文",
-                                "英文",
-                                "日语",
-                                "法语",
-                                "俄语",
-                                "德语",
-                                "西班牙语",
-                                "韩语",
-                                "意大利语",
-                            ]
-                        }
-                    ],
-                    'completion_params': {
-                        'max_token': 1000,
-                        'temperature': 0,
-                        'top_p': 0,
-                        'presence_penalty': 0.1,
-                        'frequency_penalty': 0.1,
-                    }
-                },
-                opening_statement='',
-                suggested_questions=None,
-                pre_prompt="请将以下文本翻译为{{target_language}}:\n{{query}}\n翻译:",
-                model=json.dumps({
-                    "provider": "openai",
-                    "name": "gpt-3.5-turbo-instruct",
-                    "mode": "completion",
-                    "completion_params": {
-                        "max_tokens": 1000,
-                        "temperature": 0,
-                        "top_p": 0,
-                        "presence_penalty": 0.1,
-                        "frequency_penalty": 0.1
-                    }
-                }),
-                user_input_form=json.dumps([
-                    {
-                        "select": {
-                            "label": "目标语言",
-                            "variable": "target_language",
-                            "description": "翻译的目标语言",
-                            "default": "中文",
-                            "required": True,
-                            'options': [
-                                "中文",
-                                "英文",
-                                "日语",
-                                "法语",
-                                "俄语",
-                                "德语",
-                                "西班牙语",
-                                "韩语",
-                                "意大利语",
-                            ]
-                        }
-                    }, {
-                        "paragraph": {
-                            "label": "文本内容",
-                            "variable": "query",
-                            "required": True,
-                            "default": ""
-                        }
-                    }
-                ])
-            )
-        },
-        {
-            'name': 'AI 前端面试官',
-            'icon': '',
-            'icon_background': '',
-            'description': '一个模拟的前端面试官,通过提问的方式对前端开发的技能水平进行检验。',
-            'mode': 'chat',
-            'model_config': AppModelConfig(
-                provider='openai',
-                model_id='gpt-3.5-turbo',
-                configs={
-                    'introduction': '你好,欢迎来参加我们的面试,我是这家科技公司的面试官,我将考察你的 Web 前端开发技能。接下来我会向您提出一些技术问题,请您尽可能详尽地回答。',
-                    'prompt_template': "你将扮演一个科技公司的面试官,考察用户作为候选人的 Web 前端开发水平,提出 5-10 个犀利的技术问题。\n\n请注意:\n- 每次只问一个问题\n- 用户回答问题后请直接问下一个问题,而不要试图纠正候选人的错误;\n- 如果你认为用户连续几次回答的都不对,就少问一点;\n- 问完最后一个问题后,你可以问这样一个问题:上一份工作为什么离职?用户回答该问题后,请表示理解与支持。\n",
-                    'prompt_variables': [],
-                    'completion_params': {
-                        'max_token': 300,
-                        'temperature': 0.8,
-                        'top_p': 0.9,
-                        'presence_penalty': 0.1,
-                        'frequency_penalty': 0.1,
-                    }
-                },
-                opening_statement='你好,欢迎来参加我们的面试,我是这家科技公司的面试官,我将考察你的 Web 前端开发技能。接下来我会向您提出一些技术问题,请您尽可能详尽地回答。',
-                suggested_questions=None,
-                pre_prompt="你将扮演一个科技公司的面试官,考察用户作为候选人的 Web 前端开发水平,提出 5-10 个犀利的技术问题。\n\n请注意:\n- 每次只问一个问题\n- 用户回答问题后请直接问下一个问题,而不要试图纠正候选人的错误;\n- 如果你认为用户连续几次回答的都不对,就少问一点;\n- 问完最后一个问题后,你可以问这样一个问题:上一份工作为什么离职?用户回答该问题后,请表示理解与支持。\n",
-                model=json.dumps({
-                    "provider": "openai",
-                    "name": "gpt-3.5-turbo",
-                    "mode": "chat",
-                    "completion_params": {
-                        "max_tokens": 300,
-                        "temperature": 0.8,
-                        "top_p": 0.9,
-                        "presence_penalty": 0.1,
-                        "frequency_penalty": 0.1
-                    }
-                }),
-                user_input_form=None
-            )
-        }
-    ],
-    'uk-UA': [
-        {
-            "name": "Помічник перекладу",
-            "icon": "",
-            "icon_background": "",
-            "description": "Багатомовний перекладач, який надає можливості перекладу різними мовами, перекладаючи введені користувачем дані на потрібну мову.",
-            "mode": "completion",
-            "model_config": AppModelConfig(
-                provider="openai",
-                model_id="gpt-3.5-turbo-instruct",
-                configs={
-                    "prompt_template": "Будь ласка, перекладіть наступний текст на {{target_language}}:\n",
-                    "prompt_variables": [
-                        {
-                            "key": "target_language",
-                            "name": "Цільова мова",
-                            "description": "Мова, на яку ви хочете перекласти.",
-                            "type": "select",
-                            "default": "Ukrainian",
-                            "options": [
-                                "Chinese",
-                                "English",
-                                "Japanese",
-                                "French",
-                                "Russian",
-                                "German",
-                                "Spanish",
-                                "Korean",
-                                "Italian",
-                            ],
-                        },
-                    ],
-                    "completion_params": {
-                        "max_token": 1000,
-                        "temperature": 0,
-                        "top_p": 0,
-                        "presence_penalty": 0.1,
-                        "frequency_penalty": 0.1,
-                    },
-                },
-                opening_statement="",
-                suggested_questions=None,
-                pre_prompt="Будь ласка, перекладіть наступний текст на {{target_language}}:\n{{query}}\ntranslate:",
-                model=json.dumps({
-                    "provider": "openai",
-                    "name": "gpt-3.5-turbo-instruct",
-                    "mode": "completion",
-                    "completion_params": {
-                        "max_tokens": 1000,
-                        "temperature": 0,
-                        "top_p": 0,
-                        "presence_penalty": 0.1,
-                        "frequency_penalty": 0.1,
-                    },
-                }),
-                user_input_form=json.dumps([
-                    {
-                        "select": {
-                            "label": "Цільова мова",
-                            "variable": "target_language",
-                            "description": "Мова, на яку ви хочете перекласти.",
-                            "default": "Chinese",
-                            "required": True,
-                            'options': [
-                                'Chinese',
-                                'English',
-                                'Japanese',
-                                'French',
-                                'Russian',
-                                'German',
-                                'Spanish',
-                                'Korean',
-                                'Italian',
-                            ]
-                        }
-                    }, {
-                        "paragraph": {
-                            "label": "Запит",
-                            "variable": "query",
-                            "required": True,
-                            "default": ""
-                        }
-                    }
-                ])
-            )
-        },
-        {
-            "name": "AI інтерв’юер фронтенду",
-            "icon": "",
-            "icon_background": "",
-            "description": "Симульований інтерв’юер фронтенду, який перевіряє рівень кваліфікації у розробці фронтенду через опитування.",
-            "mode": "chat",
-            "model_config": AppModelConfig(
-                provider="openai",
-                model_id="gpt-3.5-turbo",
-                configs={
-                    "introduction": "Привіт, ласкаво просимо на наше співбесіду. Я інтерв'юер цієї технологічної компанії, і я перевірю ваші навички веб-розробки фронтенду. Далі я поставлю вам декілька технічних запитань. Будь ласка, відповідайте якомога ретельніше. ",
-                    "prompt_template": "Ви будете грати роль інтерв'юера технологічної компанії, перевіряючи навички розробки фронтенду користувача та ставлячи 5-10 чітких технічних питань.\n\nЗверніть увагу:\n- Ставте лише одне запитання за раз.\n- Після того, як користувач відповість на запитання, ставте наступне запитання безпосередньо, не намагаючись виправити будь-які помилки, допущені кандидатом.\n- Якщо ви вважаєте, що користувач не відповів правильно на кілька питань поспіль, задайте менше запитань.\n- Після того, як ви задали останнє запитання, ви можете поставити таке запитання: Чому ви залишили свою попередню роботу? Після того, як користувач відповість на це питання, висловіть своє розуміння та підтримку.\n",
-                    "prompt_variables": [],
-                    "completion_params": {
-                        "max_token": 300,
-                        "temperature": 0.8,
-                        "top_p": 0.9,
-                        "presence_penalty": 0.1,
-                        "frequency_penalty": 0.1,
-                    },
-                },
-                opening_statement="Привіт, ласкаво просимо на наше співбесіду. Я інтерв'юер цієї технологічної компанії, і я перевірю ваші навички веб-розробки фронтенду. Далі я поставлю вам декілька технічних запитань. Будь ласка, відповідайте якомога ретельніше. ",
-                suggested_questions=None,
-                pre_prompt="Ви будете грати роль інтерв'юера технологічної компанії, перевіряючи навички розробки фронтенду користувача та ставлячи 5-10 чітких технічних питань.\n\nЗверніть увагу:\n- Ставте лише одне запитання за раз.\n- Після того, як користувач відповість на запитання, ставте наступне запитання безпосередньо, не намагаючись виправити будь-які помилки, допущені кандидатом.\n- Якщо ви вважаєте, що користувач не відповів правильно на кілька питань поспіль, задайте менше запитань.\n- Після того, як ви задали останнє запитання, ви можете поставити таке запитання: Чому ви залишили свою попередню роботу? Після того, як користувач відповість на це питання, висловіть своє розуміння та підтримку.\n",
-                model=json.dumps({
-                    "provider": "openai",
-                    "name": "gpt-3.5-turbo",
-                    "mode": "chat",
-                    "completion_params": {
-                        "max_tokens": 300,
-                        "temperature": 0.8,
-                        "top_p": 0.9,
-                        "presence_penalty": 0.1,
-                        "frequency_penalty": 0.1,
-                    },
-                }),
-                user_input_form=None
-            ),
-        }
-    ],
-    'vi-VN': [
-        {
-            'name': 'Trợ lý dịch thuật',
-            'icon': '',
-            'icon_background': '',
-            'description': 'Trình dịch đa ngôn ngữ cung cấp khả năng dịch bằng nhiều ngôn ngữ, dịch thông tin đầu vào của người dùng sang ngôn ngữ họ cần.',
-            'mode': 'completion',
-            'model_config': AppModelConfig(
-                provider='openai',
-                model_id='gpt-3.5-turbo-instruct',
-                configs={
-                    'prompt_template': "Hãy dịch đoạn văn bản sau sang ngôn ngữ {{target_language}}:\n",
-                    'prompt_variables': [
-                        {
-                            "key": "target_language",
-                            "name": "Ngôn ngữ đích",
-                            "description": "Ngôn ngữ bạn muốn dịch sang.",
-                            "type": "select",
-                            "default": "Vietnamese",
-                            'options': [
-                                'Chinese',
-                                'English',
-                                'Japanese',
-                                'French',
-                                'Russian',
-                                'German',
-                                'Spanish',
-                                'Korean',
-                                'Italian',
-                                'Vietnamese',
-                            ]
-                        }
-                    ],
-                    'completion_params': {
-                        'max_token': 1000,
-                        'temperature': 0,
-                        'top_p': 0,
-                        'presence_penalty': 0.1,
-                        'frequency_penalty': 0.1,
-                    }
-                },
-                opening_statement='',
-                suggested_questions=None,
-                pre_prompt="Hãy dịch đoạn văn bản sau sang {{target_language}}:\n{{query}}\ndịch:",
-                model=json.dumps({
-                    "provider": "openai",
-                    "name": "gpt-3.5-turbo-instruct",
-                    "mode": "completion",
-                    "completion_params": {
-                        "max_tokens": 1000,
-                        "temperature": 0,
-                        "top_p": 0,
-                        "presence_penalty": 0.1,
-                        "frequency_penalty": 0.1
-                    }
-                }),
-                user_input_form=json.dumps([
-                    {
-                        "select": {
-                            "label": "Ngôn ngữ đích",
-                            "variable": "target_language",
-                            "description": "Ngôn ngữ bạn muốn dịch sang.",
-                            "default": "Vietnamese",
-                            "required": True,
-                            'options': [
-                                'Chinese',
-                                'English',
-                                'Japanese',
-                                'French',
-                                'Russian',
-                                'German',
-                                'Spanish',
-                                'Korean',
-                                'Italian',
-                                'Vietnamese',
-                            ]
-                        }
-                    }, {
-                        "paragraph": {
-                            "label": "Query",
-                            "variable": "query",
-                            "required": True,
-                            "default": ""
-                        }
-                    }
-                ])
-            )
-        },
-        {
-            'name': 'Phỏng vấn front-end AI',
-            'icon': '',
-            'icon_background': '',
-            'description': 'Một người phỏng vấn front-end mô phỏng để kiểm tra mức độ kỹ năng phát triển front-end thông qua việc đặt câu hỏi.',
-            'mode': 'chat',
-            'model_config': AppModelConfig(
-                provider='openai',
-                model_id='gpt-3.5-turbo',
-                configs={
-                    'introduction': 'Xin chào, chào mừng đến với cuộc phỏng vấn của chúng tôi. Tôi là người phỏng vấn cho công ty công nghệ này và tôi sẽ kiểm tra kỹ năng phát triển web front-end của bạn. Tiếp theo, tôi sẽ hỏi bạn một số câu hỏi kỹ thuật. Hãy trả lời chúng càng kỹ lưỡng càng tốt. ',
-                    'prompt_template': "Bạn sẽ đóng vai người phỏng vấn cho một công ty công nghệ, kiểm tra kỹ năng phát triển web front-end của người dùng và đặt ra 5-10 câu hỏi kỹ thuật sắc bén.\n\nXin lưu ý:\n- Mỗi lần chỉ hỏi một câu hỏi.\n - Sau khi người dùng trả lời một câu hỏi, hãy hỏi trực tiếp câu hỏi tiếp theo mà không cố gắng sửa bất kỳ lỗi nào mà thí sinh mắc phải.\n- Nếu bạn cho rằng người dùng đã không trả lời đúng cho một số câu hỏi liên tiếp, hãy hỏi ít câu hỏi hơn.\n- Sau đặt câu hỏi cuối cùng, bạn có thể hỏi câu hỏi này: Tại sao bạn lại rời bỏ công việc cuối cùng của mình? Sau khi người dùng trả lời câu hỏi này, vui lòng bày tỏ sự hiểu biết và ủng hộ của bạn.\n",
-                    'prompt_variables': [],
-                    'completion_params': {
-                        'max_token': 300,
-                        'temperature': 0.8,
-                        'top_p': 0.9,
-                        'presence_penalty': 0.1,
-                        'frequency_penalty': 0.1,
-                    }
-                },
-                opening_statement='Xin chào, chào mừng đến với cuộc phỏng vấn của chúng tôi. Tôi là người phỏng vấn cho công ty công nghệ này và tôi sẽ kiểm tra kỹ năng phát triển web front-end của bạn. Tiếp theo, tôi sẽ hỏi bạn một số câu hỏi kỹ thuật. Hãy trả lời chúng càng kỹ lưỡng càng tốt. ',
-                suggested_questions=None,
-                pre_prompt="Bạn sẽ đóng vai người phỏng vấn cho một công ty công nghệ, kiểm tra kỹ năng phát triển web front-end của người dùng và đặt ra 5-10 câu hỏi kỹ thuật sắc bén.\n\nXin lưu ý:\n- Mỗi lần chỉ hỏi một câu hỏi.\n - Sau khi người dùng trả lời một câu hỏi, hãy hỏi trực tiếp câu hỏi tiếp theo mà không cố gắng sửa bất kỳ lỗi nào mà thí sinh mắc phải.\n- Nếu bạn cho rằng người dùng đã không trả lời đúng cho một số câu hỏi liên tiếp, hãy hỏi ít câu hỏi hơn.\n- Sau đặt câu hỏi cuối cùng, bạn có thể hỏi câu hỏi này: Tại sao bạn lại rời bỏ công việc cuối cùng của mình? Sau khi người dùng trả lời câu hỏi này, vui lòng bày tỏ sự hiểu biết và ủng hộ của bạn.\n",
-                model=json.dumps({
-                    "provider": "openai",
-                    "name": "gpt-3.5-turbo",
-                    "mode": "chat",
-                    "completion_params": {
-                        "max_tokens": 300,
-                        "temperature": 0.8,
-                        "top_p": 0.9,
-                        "presence_penalty": 0.1,
-                        "frequency_penalty": 0.1
-                    }
-                }),
-                user_input_form=None
-            )
-        }
-    ],
-}

+ 52 - 30
api/constants/model_template.py

@@ -1,27 +1,31 @@
 import json
 
-model_templates = {
+from models.model import AppMode
+
+default_app_templates = {
+    # workflow default mode
+    AppMode.WORKFLOW: {
+        'app': {
+            'mode': AppMode.WORKFLOW.value,
+            'enable_site': True,
+            'enable_api': True
+        }
+    },
+
     # completion default mode
-    'completion_default': {
+    AppMode.COMPLETION: {
         'app': {
-            'mode': 'completion',
+            'mode': AppMode.COMPLETION.value,
             'enable_site': True,
-            'enable_api': True,
-            'is_demo': False,
-            'api_rpm': 0,
-            'api_rph': 0,
-            'status': 'normal'
+            'enable_api': True
         },
         'model_config': {
-            'provider': '',
-            'model_id': '',
-            'configs': {},
-            'model': json.dumps({
+            'model': {
                 "provider": "openai",
-                "name": "gpt-3.5-turbo-instruct",
-                "mode": "completion",
+                "name": "gpt-4",
+                "mode": "chat",
                 "completion_params": {}
-            }),
+            },
             'user_input_form': json.dumps([
                 {
                     "paragraph": {
@@ -33,32 +37,50 @@ model_templates = {
                 }
             ]),
             'pre_prompt': '{{query}}'
-        }
+        },
+
     },
 
     # chat default mode
-    'chat_default': {
+    AppMode.CHAT: {
         'app': {
-            'mode': 'chat',
+            'mode': AppMode.CHAT.value,
             'enable_site': True,
-            'enable_api': True,
-            'is_demo': False,
-            'api_rpm': 0,
-            'api_rph': 0,
-            'status': 'normal'
+            'enable_api': True
         },
         'model_config': {
-            'provider': '',
-            'model_id': '',
-            'configs': {},
-            'model': json.dumps({
+            'model': {
                 "provider": "openai",
-                "name": "gpt-3.5-turbo",
+                "name": "gpt-4",
                 "mode": "chat",
                 "completion_params": {}
-            })
+            }
         }
     },
-}
 
+    # advanced-chat default mode
+    AppMode.ADVANCED_CHAT: {
+        'app': {
+            'mode': AppMode.ADVANCED_CHAT.value,
+            'enable_site': True,
+            'enable_api': True
+        }
+    },
 
+    # agent-chat default mode
+    AppMode.AGENT_CHAT: {
+        'app': {
+            'mode': AppMode.AGENT_CHAT.value,
+            'enable_site': True,
+            'enable_api': True
+        },
+        'model_config': {
+            'model': {
+                "provider": "openai",
+                "name": "gpt-4",
+                "mode": "chat",
+                "completion_params": {}
+            }
+        }
+    }
+}

File diff suppressed because it is too large
+ 534 - 0
api/constants/recommended_apps.json


+ 5 - 4
api/controllers/console/__init__.py

@@ -5,10 +5,10 @@ bp = Blueprint('console', __name__, url_prefix='/console/api')
 api = ExternalApi(bp)
 
 # Import other controllers
-from . import admin, apikey, extension, feature, setup, version
+from . import admin, apikey, extension, feature, setup, version, ping
 # Import app controllers
 from .app import (advanced_prompt_template, annotation, app, audio, completion, conversation, generator, message,
-                  model_config, site, statistic)
+                  model_config, site, statistic, workflow, workflow_run, workflow_app_log, workflow_statistic, agent)
 # Import auth controllers
 from .auth import activate, data_source_oauth, login, oauth
 # Import billing controllers
@@ -16,6 +16,7 @@ from .billing import billing
 # Import datasets controllers
 from .datasets import data_source, datasets, datasets_document, datasets_segments, file, hit_testing
 # Import explore controllers
-from .explore import audio, completion, conversation, installed_app, message, parameter, recommended_app, saved_message
+from .explore import (audio, completion, conversation, installed_app, message, parameter, recommended_app,
+                      saved_message, workflow)
 # Import workspace controllers
-from .workspace import account, members, model_providers, models, tool_providers, workspace
+from .workspace import account, members, model_providers, models, tool_providers, workspace

+ 0 - 21
api/controllers/console/app/__init__.py

@@ -1,21 +0,0 @@
-from controllers.console.app.error import AppUnavailableError
-from extensions.ext_database import db
-from flask_login import current_user
-from models.model import App
-from werkzeug.exceptions import NotFound
-
-
-def _get_app(app_id, mode=None):
-    app = db.session.query(App).filter(
-        App.id == app_id,
-        App.tenant_id == current_user.current_tenant_id,
-        App.status == 'normal'
-    ).first()
-
-    if not app:
-        raise NotFound("App not found")
-
-    if mode and app.mode != mode:
-        raise NotFound("The {} app not found".format(mode))
-
-    return app

+ 32 - 0
api/controllers/console/app/agent.py

@@ -0,0 +1,32 @@
+from flask_restful import Resource, reqparse
+
+from controllers.console import api
+from controllers.console.app.wraps import get_app_model
+from controllers.console.setup import setup_required
+from controllers.console.wraps import account_initialization_required
+from libs.helper import uuid_value
+from libs.login import login_required
+from models.model import AppMode
+from services.agent_service import AgentService
+
+
+class AgentLogApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model(mode=[AppMode.AGENT_CHAT])
+    def get(self, app_model):
+        """Get agent logs"""
+        parser = reqparse.RequestParser()
+        parser.add_argument('message_id', type=uuid_value, required=True, location='args')
+        parser.add_argument('conversation_id', type=uuid_value, required=True, location='args')
+
+        args = parser.parse_args()
+
+        return AgentService.get_agent_logs(
+            app_model,
+            args['conversation_id'],
+            args['message_id']
+        )
+    
+api.add_resource(AgentLogApi, '/apps/<uuid:app_id>/agent/logs')

+ 163 - 307
api/controllers/console/app/app.py

@@ -1,41 +1,28 @@
 import json
-import logging
-from datetime import datetime
 
 from flask_login import current_user
-from flask_restful import Resource, abort, inputs, marshal_with, reqparse
-from werkzeug.exceptions import Forbidden
+from flask_restful import Resource, inputs, marshal_with, reqparse
+from werkzeug.exceptions import Forbidden, BadRequest
 
-from constants.languages import demo_model_templates, languages
-from constants.model_template import model_templates
 from controllers.console import api
-from controllers.console.app.error import AppNotFoundError, ProviderNotInitializeError
+from controllers.console.app.wraps import get_app_model
 from controllers.console.setup import setup_required
 from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
-from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
-from core.model_manager import ModelManager
-from core.model_runtime.entities.model_entities import ModelType
-from core.provider_manager import ProviderManager
-from events.app_event import app_was_created, app_was_deleted
+from core.agent.entities import AgentToolEntity
 from extensions.ext_database import db
 from fields.app_fields import (
     app_detail_fields,
     app_detail_fields_with_site,
     app_pagination_fields,
-    template_list_fields,
 )
 from libs.login import login_required
-from models.model import App, AppModelConfig, Site
-from services.app_model_config_service import AppModelConfigService
+from services.app_service import AppService
+from models.model import App, AppModelConfig, AppMode
 from core.tools.utils.configuration import ToolParameterConfigurationManager
 from core.tools.tool_manager import ToolManager
-from core.entities.application_entities import AgentToolEntity
 
-def _get_app(app_id, tenant_id):
-    app = db.session.query(App).filter(App.id == app_id, App.tenant_id == tenant_id).first()
-    if not app:
-        raise AppNotFoundError
-    return app
+
+ALLOW_CREATE_APP_MODES = ['chat', 'agent-chat', 'advanced-chat', 'workflow', 'completion']
 
 
 class AppListApi(Resource):
@@ -49,33 +36,15 @@ class AppListApi(Resource):
         parser = reqparse.RequestParser()
         parser.add_argument('page', type=inputs.int_range(1, 99999), required=False, default=1, location='args')
         parser.add_argument('limit', type=inputs.int_range(1, 100), required=False, default=20, location='args')
-        parser.add_argument('mode', type=str, choices=['chat', 'completion', 'all'], default='all', location='args', required=False)
+        parser.add_argument('mode', type=str, choices=['chat', 'workflow', 'agent-chat', 'channel', 'all'], default='all', location='args', required=False)
         parser.add_argument('name', type=str, location='args', required=False)
         args = parser.parse_args()
 
-        filters = [
-            App.tenant_id == current_user.current_tenant_id,
-            App.is_universal == False
-        ]
-
-        if args['mode'] == 'completion':
-            filters.append(App.mode == 'completion')
-        elif args['mode'] == 'chat':
-            filters.append(App.mode == 'chat')
-        else:
-            pass
-
-        if 'name' in args and args['name']:
-            filters.append(App.name.ilike(f'%{args["name"]}%'))
+        # get app list
+        app_service = AppService()
+        app_pagination = app_service.get_paginate_apps(current_user.current_tenant_id, args)
 
-        app_models = db.paginate(
-            db.select(App).where(*filters).order_by(App.created_at.desc()),
-            page=args['page'],
-            per_page=args['limit'],
-            error_out=False
-        )
-
-        return app_models
+        return app_pagination
 
     @setup_required
     @login_required
@@ -86,147 +55,49 @@ class AppListApi(Resource):
         """Create app"""
         parser = reqparse.RequestParser()
         parser.add_argument('name', type=str, required=True, location='json')
-        parser.add_argument('mode', type=str, choices=['completion', 'chat', 'assistant'], location='json')
+        parser.add_argument('description', type=str, location='json')
+        parser.add_argument('mode', type=str, choices=ALLOW_CREATE_APP_MODES, location='json')
         parser.add_argument('icon', type=str, location='json')
         parser.add_argument('icon_background', type=str, location='json')
-        parser.add_argument('model_config', type=dict, location='json')
         args = parser.parse_args()
 
         # The role of the current user in the ta table must be admin or owner
         if not current_user.is_admin_or_owner:
             raise Forbidden()
 
-        try:
-            provider_manager = ProviderManager()
-            default_model_entity = provider_manager.get_default_model(
-                tenant_id=current_user.current_tenant_id,
-                model_type=ModelType.LLM
-            )
-        except (ProviderTokenNotInitError, LLMBadRequestError):
-            default_model_entity = None
-        except Exception as e:
-            logging.exception(e)
-            default_model_entity = None
-
-        if args['model_config'] is not None:
-            # validate config
-            model_config_dict = args['model_config']
-
-            # Get provider configurations
-            provider_manager = ProviderManager()
-            provider_configurations = provider_manager.get_configurations(current_user.current_tenant_id)
-
-            # get available models from provider_configurations
-            available_models = provider_configurations.get_models(
-                model_type=ModelType.LLM,
-                only_active=True
-            )
-
-            # check if model is available
-            available_models_names = [f'{model.provider.provider}.{model.model}' for model in available_models]
-            provider_model = f"{model_config_dict['model']['provider']}.{model_config_dict['model']['name']}"
-            if provider_model not in available_models_names:
-                if not default_model_entity:
-                    raise ProviderNotInitializeError(
-                        "No Default System Reasoning Model available. Please configure "
-                        "in the Settings -> Model Provider.")
-                else:
-                    model_config_dict["model"]["provider"] = default_model_entity.provider.provider
-                    model_config_dict["model"]["name"] = default_model_entity.model
-
-            model_configuration = AppModelConfigService.validate_configuration(
-                tenant_id=current_user.current_tenant_id,
-                account=current_user,
-                config=model_config_dict,
-                app_mode=args['mode']
-            )
-
-            app = App(
-                enable_site=True,
-                enable_api=True,
-                is_demo=False,
-                api_rpm=0,
-                api_rph=0,
-                status='normal'
-            )
-
-            app_model_config = AppModelConfig()
-            app_model_config = app_model_config.from_model_config_dict(model_configuration)
-        else:
-            if 'mode' not in args or args['mode'] is None:
-                abort(400, message="mode is required")
-
-            model_config_template = model_templates[args['mode'] + '_default']
-
-            app = App(**model_config_template['app'])
-            app_model_config = AppModelConfig(**model_config_template['model_config'])
-
-            # get model provider
-            model_manager = ModelManager()
-
-            try:
-                model_instance = model_manager.get_default_model_instance(
-                    tenant_id=current_user.current_tenant_id,
-                    model_type=ModelType.LLM
-                )
-            except ProviderTokenNotInitError:
-                model_instance = None
-
-            if model_instance:
-                model_dict = app_model_config.model_dict
-                model_dict['provider'] = model_instance.provider
-                model_dict['name'] = model_instance.model
-                app_model_config.model = json.dumps(model_dict)
-
-        app.name = args['name']
-        app.mode = args['mode']
-        app.icon = args['icon']
-        app.icon_background = args['icon_background']
-        app.tenant_id = current_user.current_tenant_id
-
-        db.session.add(app)
-        db.session.flush()
-
-        app_model_config.app_id = app.id
-        db.session.add(app_model_config)
-        db.session.flush()
-
-        app.app_model_config_id = app_model_config.id
-
-        account = current_user
-
-        site = Site(
-            app_id=app.id,
-            title=app.name,
-            default_language=account.interface_language,
-            customize_token_strategy='not_allow',
-            code=Site.generate_code(16)
-        )
-
-        db.session.add(site)
-        db.session.commit()
-
-        app_was_created.send(app)
+        if 'mode' not in args or args['mode'] is None:
+            raise BadRequest("mode is required")
+
+        app_service = AppService()
+        app = app_service.create_app(current_user.current_tenant_id, args, current_user)
 
         return app, 201
-    
 
-class AppTemplateApi(Resource):
 
+class AppImportApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    @marshal_with(template_list_fields)
-    def get(self):
-        """Get app demo templates"""
-        account = current_user
-        interface_language = account.interface_language
+    @marshal_with(app_detail_fields_with_site)
+    @cloud_edition_billing_resource_check('apps')
+    def post(self):
+        """Import app"""
+        # The role of the current user in the ta table must be admin or owner
+        if not current_user.is_admin_or_owner:
+            raise Forbidden()
 
-        templates = demo_model_templates.get(interface_language)
-        if not templates:
-            templates = demo_model_templates.get(languages[0])
+        parser = reqparse.RequestParser()
+        parser.add_argument('data', type=str, required=True, nullable=False, location='json')
+        parser.add_argument('name', type=str, location='json')
+        parser.add_argument('description', type=str, location='json')
+        parser.add_argument('icon', type=str, location='json')
+        parser.add_argument('icon_background', type=str, location='json')
+        args = parser.parse_args()
 
-        return {'data': templates}
+        app_service = AppService()
+        app = app_service.import_app(current_user.current_tenant_id, args['data'], args, current_user)
+
+        return app, 201
 
 
 class AppApi(Resource):
@@ -234,213 +105,198 @@ class AppApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
+    @get_app_model
     @marshal_with(app_detail_fields_with_site)
-    def get(self, app_id):
+    def get(self, app_model):
         """Get app detail"""
-        app_id = str(app_id)
-        app: App = _get_app(app_id, current_user.current_tenant_id)
-
         # get original app model config
-        model_config: AppModelConfig = app.app_model_config
-        agent_mode = model_config.agent_mode_dict
-        # decrypt agent tool parameters if it's secret-input
-        for tool in agent_mode.get('tools') or []:
-            if not isinstance(tool, dict) or len(tool.keys()) <= 3:
-                continue
-            agent_tool_entity = AgentToolEntity(**tool)
-            # get tool
-            try:
-                tool_runtime = ToolManager.get_agent_tool_runtime(
-                    tenant_id=current_user.current_tenant_id,
-                    agent_tool=agent_tool_entity,
-                    agent_callback=None
-                )
-                manager = ToolParameterConfigurationManager(
-                    tenant_id=current_user.current_tenant_id,
-                    tool_runtime=tool_runtime,
-                    provider_name=agent_tool_entity.provider_id,
-                    provider_type=agent_tool_entity.provider_type,
-                )
-
-                # get decrypted parameters
-                if agent_tool_entity.tool_parameters:
-                    parameters = manager.decrypt_tool_parameters(agent_tool_entity.tool_parameters or {})
-                    masked_parameter = manager.mask_tool_parameters(parameters or {})
-                else:
-                    masked_parameter = {}
-
-                # override tool parameters
-                tool['tool_parameters'] = masked_parameter
-            except Exception as e:
-                pass
-
-        # override agent mode
-        model_config.agent_mode = json.dumps(agent_mode)
-
-        return app
+        if app_model.mode == AppMode.AGENT_CHAT.value or app_model.is_agent:
+            model_config: AppModelConfig = app_model.app_model_config
+            agent_mode = model_config.agent_mode_dict
+            # decrypt agent tool parameters if it's secret-input
+            for tool in agent_mode.get('tools') or []:
+                if not isinstance(tool, dict) or len(tool.keys()) <= 3:
+                    continue
+                agent_tool_entity = AgentToolEntity(**tool)
+                # get tool
+                try:
+                    tool_runtime = ToolManager.get_agent_tool_runtime(
+                        tenant_id=current_user.current_tenant_id,
+                        agent_tool=agent_tool_entity,
+                    )
+                    manager = ToolParameterConfigurationManager(
+                        tenant_id=current_user.current_tenant_id,
+                        tool_runtime=tool_runtime,
+                        provider_name=agent_tool_entity.provider_id,
+                        provider_type=agent_tool_entity.provider_type,
+                    )
+
+                    # get decrypted parameters
+                    if agent_tool_entity.tool_parameters:
+                        parameters = manager.decrypt_tool_parameters(agent_tool_entity.tool_parameters or {})
+                        masked_parameter = manager.mask_tool_parameters(parameters or {})
+                    else:
+                        masked_parameter = {}
+
+                    # override tool parameters
+                    tool['tool_parameters'] = masked_parameter
+                except Exception as e:
+                    pass
+
+            # override agent mode
+            model_config.agent_mode = json.dumps(agent_mode)
+            db.session.commit()
+
+        return app_model
+
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model
+    @marshal_with(app_detail_fields_with_site)
+    def put(self, app_model):
+        """Update app"""
+        parser = reqparse.RequestParser()
+        parser.add_argument('name', type=str, required=True, nullable=False, location='json')
+        parser.add_argument('description', type=str, location='json')
+        parser.add_argument('icon', type=str, location='json')
+        parser.add_argument('icon_background', type=str, location='json')
+        args = parser.parse_args()
+
+        app_service = AppService()
+        app_model = app_service.update_app(app_model, args)
+
+        return app_model
 
     @setup_required
     @login_required
     @account_initialization_required
-    def delete(self, app_id):
+    @get_app_model
+    def delete(self, app_model):
         """Delete app"""
-        app_id = str(app_id)
+        if not current_user.is_admin_or_owner:
+            raise Forbidden()
+
+        app_service = AppService()
+        app_service.delete_app(app_model)
+
+        return {'result': 'success'}, 204
 
+
+class AppCopyApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model
+    @marshal_with(app_detail_fields_with_site)
+    def post(self, app_model):
+        """Copy app"""
+        # The role of the current user in the ta table must be admin or owner
         if not current_user.is_admin_or_owner:
             raise Forbidden()
 
-        app = _get_app(app_id, current_user.current_tenant_id)
+        parser = reqparse.RequestParser()
+        parser.add_argument('name', type=str, location='json')
+        parser.add_argument('description', type=str, location='json')
+        parser.add_argument('icon', type=str, location='json')
+        parser.add_argument('icon_background', type=str, location='json')
+        args = parser.parse_args()
 
-        db.session.delete(app)
-        db.session.commit()
+        app_service = AppService()
+        data = app_service.export_app(app_model)
+        app = app_service.import_app(current_user.current_tenant_id, data, args, current_user)
 
-        # todo delete related data??
-        # model_config, site, api_token, conversation, message, message_feedback, message_annotation
+        return app, 201
 
-        app_was_deleted.send(app)
 
-        return {'result': 'success'}, 204
+class AppExportApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model
+    def get(self, app_model):
+        """Export app"""
+        app_service = AppService()
+
+        return {
+            "data": app_service.export_app(app_model)
+        }
 
 
 class AppNameApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
+    @get_app_model
     @marshal_with(app_detail_fields)
-    def post(self, app_id):
-        app_id = str(app_id)
-        app = _get_app(app_id, current_user.current_tenant_id)
-
+    def post(self, app_model):
         parser = reqparse.RequestParser()
         parser.add_argument('name', type=str, required=True, location='json')
         args = parser.parse_args()
 
-        app.name = args.get('name')
-        app.updated_at = datetime.utcnow()
-        db.session.commit()
-        return app
+        app_service = AppService()
+        app_model = app_service.update_app_name(app_model, args.get('name'))
+
+        return app_model
 
 
 class AppIconApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
+    @get_app_model
     @marshal_with(app_detail_fields)
-    def post(self, app_id):
-        app_id = str(app_id)
-        app = _get_app(app_id, current_user.current_tenant_id)
-
+    def post(self, app_model):
         parser = reqparse.RequestParser()
         parser.add_argument('icon', type=str, location='json')
         parser.add_argument('icon_background', type=str, location='json')
         args = parser.parse_args()
 
-        app.icon = args.get('icon')
-        app.icon_background = args.get('icon_background')
-        app.updated_at = datetime.utcnow()
-        db.session.commit()
+        app_service = AppService()
+        app_model = app_service.update_app_icon(app_model, args.get('icon'), args.get('icon_background'))
 
-        return app
+        return app_model
 
 
 class AppSiteStatus(Resource):
     @setup_required
     @login_required
     @account_initialization_required
+    @get_app_model
     @marshal_with(app_detail_fields)
-    def post(self, app_id):
+    def post(self, app_model):
         parser = reqparse.RequestParser()
         parser.add_argument('enable_site', type=bool, required=True, location='json')
         args = parser.parse_args()
-        app_id = str(app_id)
-        app = db.session.query(App).filter(App.id == app_id, App.tenant_id == current_user.current_tenant_id).first()
-        if not app:
-            raise AppNotFoundError
 
-        if args.get('enable_site') == app.enable_site:
-            return app
+        app_service = AppService()
+        app_model = app_service.update_app_site_status(app_model, args.get('enable_site'))
 
-        app.enable_site = args.get('enable_site')
-        app.updated_at = datetime.utcnow()
-        db.session.commit()
-        return app
+        return app_model
 
 
 class AppApiStatus(Resource):
     @setup_required
     @login_required
     @account_initialization_required
+    @get_app_model
     @marshal_with(app_detail_fields)
-    def post(self, app_id):
+    def post(self, app_model):
         parser = reqparse.RequestParser()
         parser.add_argument('enable_api', type=bool, required=True, location='json')
         args = parser.parse_args()
 
-        app_id = str(app_id)
-        app = _get_app(app_id, current_user.current_tenant_id)
-
-        if args.get('enable_api') == app.enable_api:
-            return app
-
-        app.enable_api = args.get('enable_api')
-        app.updated_at = datetime.utcnow()
-        db.session.commit()
-        return app
-
-
-class AppCopy(Resource):
-    @staticmethod
-    def create_app_copy(app):
-        copy_app = App(
-            name=app.name + ' copy',
-            icon=app.icon,
-            icon_background=app.icon_background,
-            tenant_id=app.tenant_id,
-            mode=app.mode,
-            app_model_config_id=app.app_model_config_id,
-            enable_site=app.enable_site,
-            enable_api=app.enable_api,
-            api_rpm=app.api_rpm,
-            api_rph=app.api_rph
-        )
-        return copy_app
-
-    @staticmethod
-    def create_app_model_config_copy(app_config, copy_app_id):
-        copy_app_model_config = app_config.copy()
-        copy_app_model_config.app_id = copy_app_id
-
-        return copy_app_model_config
-
-    @setup_required
-    @login_required
-    @account_initialization_required
-    @marshal_with(app_detail_fields)
-    def post(self, app_id):
-        app_id = str(app_id)
-        app = _get_app(app_id, current_user.current_tenant_id)
-
-        copy_app = self.create_app_copy(app)
-        db.session.add(copy_app)
-
-        app_config = db.session.query(AppModelConfig). \
-            filter(AppModelConfig.app_id == app_id). \
-            one_or_none()
-
-        if app_config:
-            copy_app_model_config = self.create_app_model_config_copy(app_config, copy_app.id)
-            db.session.add(copy_app_model_config)
-            db.session.commit()
-            copy_app.app_model_config_id = copy_app_model_config.id
-        db.session.commit()
+        app_service = AppService()
+        app_model = app_service.update_app_api_status(app_model, args.get('enable_api'))
 
-        return copy_app, 201
+        return app_model
 
 
 api.add_resource(AppListApi, '/apps')
-api.add_resource(AppTemplateApi, '/app-templates')
+api.add_resource(AppImportApi, '/apps/import')
 api.add_resource(AppApi, '/apps/<uuid:app_id>')
-api.add_resource(AppCopy, '/apps/<uuid:app_id>/copy')
+api.add_resource(AppCopyApi, '/apps/<uuid:app_id>/copy')
+api.add_resource(AppExportApi, '/apps/<uuid:app_id>/export')
 api.add_resource(AppNameApi, '/apps/<uuid:app_id>/name')
 api.add_resource(AppIconApi, '/apps/<uuid:app_id>/icon')
 api.add_resource(AppSiteStatus, '/apps/<uuid:app_id>/site-enable')

+ 14 - 15
api/controllers/console/app/audio.py

@@ -6,7 +6,6 @@ from werkzeug.exceptions import InternalServerError
 
 import services
 from controllers.console import api
-from controllers.console.app import _get_app
 from controllers.console.app.error import (
     AppUnavailableError,
     AudioTooLargeError,
@@ -18,11 +17,13 @@ from controllers.console.app.error import (
     ProviderQuotaExceededError,
     UnsupportedAudioTypeError,
 )
+from controllers.console.app.wraps import get_app_model
 from controllers.console.setup import setup_required
 from controllers.console.wraps import account_initialization_required
 from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
 from core.model_runtime.errors.invoke import InvokeError
 from libs.login import login_required
+from models.model import AppMode
 from services.audio_service import AudioService
 from services.errors.audio import (
     AudioTooLargeServiceError,
@@ -36,15 +37,13 @@ class ChatMessageAudioApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    def post(self, app_id):
-        app_id = str(app_id)
-        app_model = _get_app(app_id, 'chat')
-
+    @get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
+    def post(self, app_model):
         file = request.files['file']
 
         try:
             response = AudioService.transcript_asr(
-                tenant_id=app_model.tenant_id,
+                app_model=app_model,
                 file=file,
                 end_user=None,
             )
@@ -80,15 +79,13 @@ class ChatMessageTextApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    def post(self, app_id):
-        app_id = str(app_id)
-        app_model = _get_app(app_id, None)
-
+    @get_app_model
+    def post(self, app_model):
         try:
             response = AudioService.transcript_tts(
-                tenant_id=app_model.tenant_id,
+                app_model=app_model,
                 text=request.form['text'],
-                voice=request.form['voice'] if request.form['voice'] else app_model.app_model_config.text_to_speech_dict.get('voice'),
+                voice=request.form.get('voice'),
                 streaming=False
             )
 
@@ -120,9 +117,11 @@ class ChatMessageTextApi(Resource):
 
 
 class TextModesApi(Resource):
-    def get(self, app_id: str):
-        app_model = _get_app(str(app_id))
-
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model
+    def get(self, app_model):
         try:
             parser = reqparse.RequestParser()
             parser.add_argument('language', type=str, required=True, location='args')

+ 22 - 53
api/controllers/console/app/completion.py

@@ -1,16 +1,11 @@
-import json
 import logging
-from collections.abc import Generator
-from typing import Union
 
 import flask_login
-from flask import Response, stream_with_context
 from flask_restful import Resource, reqparse
 from werkzeug.exceptions import InternalServerError, NotFound
 
 import services
 from controllers.console import api
-from controllers.console.app import _get_app
 from controllers.console.app.error import (
     AppUnavailableError,
     CompletionRequestError,
@@ -19,15 +14,18 @@ from controllers.console.app.error import (
     ProviderNotInitializeError,
     ProviderQuotaExceededError,
 )
+from controllers.console.app.wraps import get_app_model
 from controllers.console.setup import setup_required
 from controllers.console.wraps import account_initialization_required
-from core.application_queue_manager import ApplicationQueueManager
-from core.entities.application_entities import InvokeFrom
+from core.app.apps.base_app_queue_manager import AppQueueManager
+from core.app.entities.app_invoke_entities import InvokeFrom
 from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
 from core.model_runtime.errors.invoke import InvokeError
+from libs import helper
 from libs.helper import uuid_value
 from libs.login import login_required
-from services.completion_service import CompletionService
+from models.model import AppMode
+from services.app_generate_service import AppGenerateService
 
 
 # define completion message api for user
@@ -36,12 +34,8 @@ class CompletionMessageApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    def post(self, app_id):
-        app_id = str(app_id)
-
-        # get app info
-        app_model = _get_app(app_id, 'completion')
-
+    @get_app_model(mode=AppMode.COMPLETION)
+    def post(self, app_model):
         parser = reqparse.RequestParser()
         parser.add_argument('inputs', type=dict, required=True, location='json')
         parser.add_argument('query', type=str, location='json', default='')
@@ -57,16 +51,15 @@ class CompletionMessageApi(Resource):
         account = flask_login.current_user
 
         try:
-            response = CompletionService.completion(
+            response = AppGenerateService.generate(
                 app_model=app_model,
                 user=account,
                 args=args,
                 invoke_from=InvokeFrom.DEBUGGER,
-                streaming=streaming,
-                is_model_config_override=True
+                streaming=streaming
             )
 
-            return compact_response(response)
+            return helper.compact_generate_response(response)
         except services.errors.conversation.ConversationNotExistsError:
             raise NotFound("Conversation Not Exists.")
         except services.errors.conversation.ConversationCompletedError:
@@ -93,15 +86,11 @@ class CompletionMessageStopApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    def post(self, app_id, task_id):
-        app_id = str(app_id)
-
-        # get app info
-        _get_app(app_id, 'completion')
-
+    @get_app_model(mode=AppMode.COMPLETION)
+    def post(self, app_model, task_id):
         account = flask_login.current_user
 
-        ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.DEBUGGER, account.id)
+        AppQueueManager.set_stop_flag(task_id, InvokeFrom.DEBUGGER, account.id)
 
         return {'result': 'success'}, 200
 
@@ -110,12 +99,8 @@ class ChatMessageApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    def post(self, app_id):
-        app_id = str(app_id)
-
-        # get app info
-        app_model = _get_app(app_id, 'chat')
-
+    @get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT])
+    def post(self, app_model):
         parser = reqparse.RequestParser()
         parser.add_argument('inputs', type=dict, required=True, location='json')
         parser.add_argument('query', type=str, required=True, location='json')
@@ -132,16 +117,15 @@ class ChatMessageApi(Resource):
         account = flask_login.current_user
 
         try:
-            response = CompletionService.completion(
+            response = AppGenerateService.generate(
                 app_model=app_model,
                 user=account,
                 args=args,
                 invoke_from=InvokeFrom.DEBUGGER,
-                streaming=streaming,
-                is_model_config_override=True
+                streaming=streaming
             )
 
-            return compact_response(response)
+            return helper.compact_generate_response(response)
         except services.errors.conversation.ConversationNotExistsError:
             raise NotFound("Conversation Not Exists.")
         except services.errors.conversation.ConversationCompletedError:
@@ -164,30 +148,15 @@ class ChatMessageApi(Resource):
             raise InternalServerError()
 
 
-def compact_response(response: Union[dict, Generator]) -> Response:
-    if isinstance(response, dict):
-        return Response(response=json.dumps(response), status=200, mimetype='application/json')
-    else:
-        def generate() -> Generator:
-            yield from response
-
-        return Response(stream_with_context(generate()), status=200,
-                        mimetype='text/event-stream')
-
-
 class ChatMessageStopApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    def post(self, app_id, task_id):
-        app_id = str(app_id)
-
-        # get app info
-        _get_app(app_id, 'chat')
-
+    @get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
+    def post(self, app_model, task_id):
         account = flask_login.current_user
 
-        ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.DEBUGGER, account.id)
+        AppQueueManager.set_stop_flag(task_id, InvokeFrom.DEBUGGER, account.id)
 
         return {'result': 'success'}, 200
 

+ 26 - 38
api/controllers/console/app/conversation.py

@@ -9,9 +9,10 @@ from sqlalchemy.orm import joinedload
 from werkzeug.exceptions import NotFound
 
 from controllers.console import api
-from controllers.console.app import _get_app
+from controllers.console.app.wraps import get_app_model
 from controllers.console.setup import setup_required
 from controllers.console.wraps import account_initialization_required
+from core.app.entities.app_invoke_entities import InvokeFrom
 from extensions.ext_database import db
 from fields.conversation_fields import (
     conversation_detail_fields,
@@ -21,7 +22,7 @@ from fields.conversation_fields import (
 )
 from libs.helper import datetime_string
 from libs.login import login_required
-from models.model import Conversation, Message, MessageAnnotation
+from models.model import AppMode, Conversation, Message, MessageAnnotation
 
 
 class CompletionConversationApi(Resource):
@@ -29,10 +30,9 @@ class CompletionConversationApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
+    @get_app_model(mode=AppMode.COMPLETION)
     @marshal_with(conversation_pagination_fields)
-    def get(self, app_id):
-        app_id = str(app_id)
-
+    def get(self, app_model):
         parser = reqparse.RequestParser()
         parser.add_argument('keyword', type=str, location='args')
         parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
@@ -43,10 +43,7 @@ class CompletionConversationApi(Resource):
         parser.add_argument('limit', type=int_range(1, 100), default=20, location='args')
         args = parser.parse_args()
 
-        # get app info
-        app = _get_app(app_id, 'completion')
-
-        query = db.select(Conversation).where(Conversation.app_id == app.id, Conversation.mode == 'completion')
+        query = db.select(Conversation).where(Conversation.app_id == app_model.id, Conversation.mode == 'completion')
 
         if args['keyword']:
             query = query.join(
@@ -106,24 +103,22 @@ class CompletionConversationDetailApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
+    @get_app_model(mode=AppMode.COMPLETION)
     @marshal_with(conversation_message_detail_fields)
-    def get(self, app_id, conversation_id):
-        app_id = str(app_id)
+    def get(self, app_model, conversation_id):
         conversation_id = str(conversation_id)
 
-        return _get_conversation(app_id, conversation_id, 'completion')
+        return _get_conversation(app_model, conversation_id)
 
     @setup_required
     @login_required
     @account_initialization_required
-    def delete(self, app_id, conversation_id):
-        app_id = str(app_id)
+    @get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
+    def delete(self, app_model, conversation_id):
         conversation_id = str(conversation_id)
 
-        app = _get_app(app_id, 'chat')
-
         conversation = db.session.query(Conversation) \
-            .filter(Conversation.id == conversation_id, Conversation.app_id == app.id).first()
+            .filter(Conversation.id == conversation_id, Conversation.app_id == app_model.id).first()
 
         if not conversation:
             raise NotFound("Conversation Not Exists.")
@@ -139,10 +134,9 @@ class ChatConversationApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
+    @get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
     @marshal_with(conversation_with_summary_pagination_fields)
-    def get(self, app_id):
-        app_id = str(app_id)
-
+    def get(self, app_model):
         parser = reqparse.RequestParser()
         parser.add_argument('keyword', type=str, location='args')
         parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
@@ -154,10 +148,7 @@ class ChatConversationApi(Resource):
         parser.add_argument('limit', type=int_range(1, 100), required=False, default=20, location='args')
         args = parser.parse_args()
 
-        # get app info
-        app = _get_app(app_id, 'chat')
-
-        query = db.select(Conversation).where(Conversation.app_id == app.id, Conversation.mode == 'chat')
+        query = db.select(Conversation).where(Conversation.app_id == app_model.id)
 
         if args['keyword']:
             query = query.join(
@@ -211,6 +202,9 @@ class ChatConversationApi(Resource):
                 .having(func.count(Message.id) >= args['message_count_gte'])
             )
 
+        if app_model.mode == AppMode.ADVANCED_CHAT.value:
+            query = query.where(Conversation.invoke_from != InvokeFrom.DEBUGGER.value)
+
         query = query.order_by(Conversation.created_at.desc())
 
         conversations = db.paginate(
@@ -228,25 +222,22 @@ class ChatConversationDetailApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
+    @get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
     @marshal_with(conversation_detail_fields)
-    def get(self, app_id, conversation_id):
-        app_id = str(app_id)
+    def get(self, app_model, conversation_id):
         conversation_id = str(conversation_id)
 
-        return _get_conversation(app_id, conversation_id, 'chat')
+        return _get_conversation(app_model, conversation_id)
 
     @setup_required
     @login_required
+    @get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
     @account_initialization_required
-    def delete(self, app_id, conversation_id):
-        app_id = str(app_id)
+    def delete(self, app_model, conversation_id):
         conversation_id = str(conversation_id)
 
-        # get app info
-        app = _get_app(app_id, 'chat')
-
         conversation = db.session.query(Conversation) \
-            .filter(Conversation.id == conversation_id, Conversation.app_id == app.id).first()
+            .filter(Conversation.id == conversation_id, Conversation.app_id == app_model.id).first()
 
         if not conversation:
             raise NotFound("Conversation Not Exists.")
@@ -263,12 +254,9 @@ api.add_resource(ChatConversationApi, '/apps/<uuid:app_id>/chat-conversations')
 api.add_resource(ChatConversationDetailApi, '/apps/<uuid:app_id>/chat-conversations/<uuid:conversation_id>')
 
 
-def _get_conversation(app_id, conversation_id, mode):
-    # get app info
-    app = _get_app(app_id, mode)
-
+def _get_conversation(app_model, conversation_id):
     conversation = db.session.query(Conversation) \
-        .filter(Conversation.id == conversation_id, Conversation.app_id == app.id).first()
+        .filter(Conversation.id == conversation_id, Conversation.app_id == app_model.id).first()
 
     if not conversation:
         raise NotFound("Conversation Not Exists.")

+ 6 - 0
api/controllers/console/app/error.py

@@ -85,3 +85,9 @@ class TooManyFilesError(BaseHTTPException):
     error_code = 'too_many_files'
     description = "Only one file is allowed."
     code = 400
+
+
+class DraftWorkflowNotExist(BaseHTTPException):
+    error_code = 'draft_workflow_not_exist'
+    description = "Draft workflow need to be initialized."
+    code = 400

+ 1 - 1
api/controllers/console/app/generator.py

@@ -11,7 +11,7 @@ from controllers.console.app.error import (
 from controllers.console.setup import setup_required
 from controllers.console.wraps import account_initialization_required
 from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
-from core.generator.llm_generator import LLMGenerator
+from core.llm_generator.llm_generator import LLMGenerator
 from core.model_runtime.errors.invoke import InvokeError
 from libs.login import login_required
 

+ 25 - 106
api/controllers/console/app/message.py

@@ -1,26 +1,22 @@
-import json
 import logging
-from collections.abc import Generator
-from typing import Union
 
-from flask import Response, stream_with_context
 from flask_login import current_user
 from flask_restful import Resource, fields, marshal_with, reqparse
 from flask_restful.inputs import int_range
 from werkzeug.exceptions import Forbidden, InternalServerError, NotFound
 
 from controllers.console import api
-from controllers.console.app import _get_app
 from controllers.console.app.error import (
-    AppMoreLikeThisDisabledError,
     CompletionRequestError,
     ProviderModelCurrentlyNotSupportError,
     ProviderNotInitializeError,
     ProviderQuotaExceededError,
 )
+from controllers.console.app.wraps import get_app_model
+from controllers.console.explore.error import AppSuggestedQuestionsAfterAnswerDisabledError
 from controllers.console.setup import setup_required
 from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
-from core.entities.application_entities import InvokeFrom
+from core.app.entities.app_invoke_entities import InvokeFrom
 from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
 from core.model_runtime.errors.invoke import InvokeError
 from extensions.ext_database import db
@@ -28,12 +24,10 @@ from fields.conversation_fields import annotation_fields, message_detail_fields
 from libs.helper import uuid_value
 from libs.infinite_scroll_pagination import InfiniteScrollPagination
 from libs.login import login_required
-from models.model import Conversation, Message, MessageAnnotation, MessageFeedback
+from models.model import AppMode, Conversation, Message, MessageAnnotation, MessageFeedback
 from services.annotation_service import AppAnnotationService
-from services.completion_service import CompletionService
-from services.errors.app import MoreLikeThisDisabledError
 from services.errors.conversation import ConversationNotExistsError
-from services.errors.message import MessageNotExistsError
+from services.errors.message import MessageNotExistsError, SuggestedQuestionsAfterAnswerDisabledError
 from services.message_service import MessageService
 
 
@@ -46,14 +40,10 @@ class ChatMessageListApi(Resource):
 
     @setup_required
     @login_required
+    @get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
     @account_initialization_required
     @marshal_with(message_infinite_scroll_pagination_fields)
-    def get(self, app_id):
-        app_id = str(app_id)
-
-        # get app info
-        app = _get_app(app_id, 'chat')
-
+    def get(self, app_model):
         parser = reqparse.RequestParser()
         parser.add_argument('conversation_id', required=True, type=uuid_value, location='args')
         parser.add_argument('first_id', type=uuid_value, location='args')
@@ -62,7 +52,7 @@ class ChatMessageListApi(Resource):
 
         conversation = db.session.query(Conversation).filter(
             Conversation.id == args['conversation_id'],
-            Conversation.app_id == app.id
+            Conversation.app_id == app_model.id
         ).first()
 
         if not conversation:
@@ -110,12 +100,8 @@ class MessageFeedbackApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    def post(self, app_id):
-        app_id = str(app_id)
-
-        # get app info
-        app = _get_app(app_id)
-
+    @get_app_model
+    def post(self, app_model):
         parser = reqparse.RequestParser()
         parser.add_argument('message_id', required=True, type=uuid_value, location='json')
         parser.add_argument('rating', type=str, choices=['like', 'dislike', None], location='json')
@@ -125,7 +111,7 @@ class MessageFeedbackApi(Resource):
 
         message = db.session.query(Message).filter(
             Message.id == message_id,
-            Message.app_id == app.id
+            Message.app_id == app_model.id
         ).first()
 
         if not message:
@@ -141,7 +127,7 @@ class MessageFeedbackApi(Resource):
             raise ValueError('rating cannot be None when feedback not exists')
         else:
             feedback = MessageFeedback(
-                app_id=app.id,
+                app_id=app_model.id,
                 conversation_id=message.conversation_id,
                 message_id=message.id,
                 rating=args['rating'],
@@ -160,21 +146,20 @@ class MessageAnnotationApi(Resource):
     @login_required
     @account_initialization_required
     @cloud_edition_billing_resource_check('annotation')
+    @get_app_model
     @marshal_with(annotation_fields)
-    def post(self, app_id):
+    def post(self, app_model):
         # The role of the current user in the ta table must be admin or owner
         if not current_user.is_admin_or_owner:
             raise Forbidden()
 
-        app_id = str(app_id)
-
         parser = reqparse.RequestParser()
         parser.add_argument('message_id', required=False, type=uuid_value, location='json')
         parser.add_argument('question', required=True, type=str, location='json')
         parser.add_argument('answer', required=True, type=str, location='json')
         parser.add_argument('annotation_reply', required=False, type=dict, location='json')
         args = parser.parse_args()
-        annotation = AppAnnotationService.up_insert_app_annotation_from_message(args, app_id)
+        annotation = AppAnnotationService.up_insert_app_annotation_from_message(args, app_model.id)
 
         return annotation
 
@@ -183,93 +168,29 @@ class MessageAnnotationCountApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    def get(self, app_id):
-        app_id = str(app_id)
-
-        # get app info
-        app = _get_app(app_id)
-
+    @get_app_model
+    def get(self, app_model):
         count = db.session.query(MessageAnnotation).filter(
-            MessageAnnotation.app_id == app.id
+            MessageAnnotation.app_id == app_model.id
         ).count()
 
         return {'count': count}
 
 
-class MessageMoreLikeThisApi(Resource):
-    @setup_required
-    @login_required
-    @account_initialization_required
-    def get(self, app_id, message_id):
-        app_id = str(app_id)
-        message_id = str(message_id)
-
-        parser = reqparse.RequestParser()
-        parser.add_argument('response_mode', type=str, required=True, choices=['blocking', 'streaming'],
-                            location='args')
-        args = parser.parse_args()
-
-        streaming = args['response_mode'] == 'streaming'
-
-        # get app info
-        app_model = _get_app(app_id, 'completion')
-
-        try:
-            response = CompletionService.generate_more_like_this(
-                app_model=app_model,
-                user=current_user,
-                message_id=message_id,
-                invoke_from=InvokeFrom.DEBUGGER,
-                streaming=streaming
-            )
-            return compact_response(response)
-        except MessageNotExistsError:
-            raise NotFound("Message Not Exists.")
-        except MoreLikeThisDisabledError:
-            raise AppMoreLikeThisDisabledError()
-        except ProviderTokenNotInitError as ex:
-            raise ProviderNotInitializeError(ex.description)
-        except QuotaExceededError:
-            raise ProviderQuotaExceededError()
-        except ModelCurrentlyNotSupportError:
-            raise ProviderModelCurrentlyNotSupportError()
-        except InvokeError as e:
-            raise CompletionRequestError(e.description)
-        except ValueError as e:
-            raise e
-        except Exception as e:
-            logging.exception("internal server error.")
-            raise InternalServerError()
-
-
-def compact_response(response: Union[dict, Generator]) -> Response:
-    if isinstance(response, dict):
-        return Response(response=json.dumps(response), status=200, mimetype='application/json')
-    else:
-        def generate() -> Generator:
-            yield from response
-
-        return Response(stream_with_context(generate()), status=200,
-                        mimetype='text/event-stream')
-
-
 class MessageSuggestedQuestionApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    def get(self, app_id, message_id):
-        app_id = str(app_id)
+    @get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
+    def get(self, app_model, message_id):
         message_id = str(message_id)
 
-        # get app info
-        app_model = _get_app(app_id, 'chat')
-
         try:
             questions = MessageService.get_suggested_questions_after_answer(
                 app_model=app_model,
                 message_id=message_id,
                 user=current_user,
-                check_enabled=False
+                invoke_from=InvokeFrom.DEBUGGER
             )
         except MessageNotExistsError:
             raise NotFound("Message not found")
@@ -283,6 +204,8 @@ class MessageSuggestedQuestionApi(Resource):
             raise ProviderModelCurrentlyNotSupportError()
         except InvokeError as e:
             raise CompletionRequestError(e.description)
+        except SuggestedQuestionsAfterAnswerDisabledError:
+            raise AppSuggestedQuestionsAfterAnswerDisabledError()
         except Exception:
             logging.exception("internal server error.")
             raise InternalServerError()
@@ -294,14 +217,11 @@ class MessageApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
+    @get_app_model
     @marshal_with(message_detail_fields)
-    def get(self, app_id, message_id):
-        app_id = str(app_id)
+    def get(self, app_model, message_id):
         message_id = str(message_id)
 
-        # get app info
-        app_model = _get_app(app_id)
-
         message = db.session.query(Message).filter(
             Message.id == message_id,
             Message.app_id == app_model.id
@@ -313,7 +233,6 @@ class MessageApi(Resource):
         return message
 
 
-api.add_resource(MessageMoreLikeThisApi, '/apps/<uuid:app_id>/completion-messages/<uuid:message_id>/more-like-this')
 api.add_resource(MessageSuggestedQuestionApi, '/apps/<uuid:app_id>/chat-messages/<uuid:message_id>/suggested-questions')
 api.add_resource(ChatMessageListApi, '/apps/<uuid:app_id>/chat-messages', endpoint='console_chat_messages')
 api.add_resource(MessageFeedbackApi, '/apps/<uuid:app_id>/feedbacks')

+ 82 - 87
api/controllers/console/app/model_config.py

@@ -5,16 +5,16 @@ from flask_login import current_user
 from flask_restful import Resource
 
 from controllers.console import api
-from controllers.console.app import _get_app
+from controllers.console.app.wraps import get_app_model
 from controllers.console.setup import setup_required
 from controllers.console.wraps import account_initialization_required
-from core.entities.application_entities import AgentToolEntity
+from core.agent.entities import AgentToolEntity
 from core.tools.tool_manager import ToolManager
 from core.tools.utils.configuration import ToolParameterConfigurationManager
 from events.app_event import app_model_config_was_updated
 from extensions.ext_database import db
 from libs.login import login_required
-from models.model import AppModelConfig
+from models.model import AppMode, AppModelConfig
 from services.app_model_config_service import AppModelConfigService
 
 
@@ -23,118 +23,113 @@ class ModelConfigResource(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    def post(self, app_id):
+    @get_app_model(mode=[AppMode.AGENT_CHAT, AppMode.CHAT, AppMode.COMPLETION])
+    def post(self, app_model):
         """Modify app model config"""
-        app_id = str(app_id)
-
-        app = _get_app(app_id)
-
         # validate config
         model_configuration = AppModelConfigService.validate_configuration(
             tenant_id=current_user.current_tenant_id,
-            account=current_user,
             config=request.json,
-            app_mode=app.mode
+            app_mode=AppMode.value_of(app_model.mode)
         )
 
         new_app_model_config = AppModelConfig(
-            app_id=app.id,
+            app_id=app_model.id,
         )
         new_app_model_config = new_app_model_config.from_model_config_dict(model_configuration)
 
-        # get original app model config
-        original_app_model_config: AppModelConfig = db.session.query(AppModelConfig).filter(
-            AppModelConfig.id == app.app_model_config_id
-        ).first()
-        agent_mode = original_app_model_config.agent_mode_dict
-        # decrypt agent tool parameters if it's secret-input
-        parameter_map = {}
-        masked_parameter_map = {}
-        tool_map = {}
-        for tool in agent_mode.get('tools') or []:
-            if not isinstance(tool, dict) or len(tool.keys()) <= 3:
-                continue
-            
-            agent_tool_entity = AgentToolEntity(**tool)
-            # get tool
-            try:
-                tool_runtime = ToolManager.get_agent_tool_runtime(
-                    tenant_id=current_user.current_tenant_id,
-                    agent_tool=agent_tool_entity,
-                    agent_callback=None
-                )
-                manager = ToolParameterConfigurationManager(
-                    tenant_id=current_user.current_tenant_id,
-                    tool_runtime=tool_runtime,
-                    provider_name=agent_tool_entity.provider_id,
-                    provider_type=agent_tool_entity.provider_type,
-                )
-            except Exception as e:
-                continue
-
-            # get decrypted parameters
-            if agent_tool_entity.tool_parameters:
-                parameters = manager.decrypt_tool_parameters(agent_tool_entity.tool_parameters or {})
-                masked_parameter = manager.mask_tool_parameters(parameters or {})
-            else:
-                parameters = {}
-                masked_parameter = {}
-
-            key = f'{agent_tool_entity.provider_id}.{agent_tool_entity.provider_type}.{agent_tool_entity.tool_name}'
-            masked_parameter_map[key] = masked_parameter
-            parameter_map[key] = parameters
-            tool_map[key] = tool_runtime
-
-        # encrypt agent tool parameters if it's secret-input
-        agent_mode = new_app_model_config.agent_mode_dict
-        for tool in agent_mode.get('tools') or []:
-            agent_tool_entity = AgentToolEntity(**tool)
-            
-            # get tool
-            key = f'{agent_tool_entity.provider_id}.{agent_tool_entity.provider_type}.{agent_tool_entity.tool_name}'
-            if key in tool_map:
-                tool_runtime = tool_map[key]
-            else:
+        if app_model.mode == AppMode.AGENT_CHAT.value or app_model.is_agent:
+            # get original app model config
+            original_app_model_config: AppModelConfig = db.session.query(AppModelConfig).filter(
+                AppModelConfig.id == app_model.app_model_config_id
+            ).first()
+            agent_mode = original_app_model_config.agent_mode_dict
+            # decrypt agent tool parameters if it's secret-input
+            parameter_map = {}
+            masked_parameter_map = {}
+            tool_map = {}
+            for tool in agent_mode.get('tools') or []:
+                if not isinstance(tool, dict) or len(tool.keys()) <= 3:
+                    continue
+
+                agent_tool_entity = AgentToolEntity(**tool)
+                # get tool
                 try:
                     tool_runtime = ToolManager.get_agent_tool_runtime(
                         tenant_id=current_user.current_tenant_id,
                         agent_tool=agent_tool_entity,
-                        agent_callback=None
+                    )
+                    manager = ToolParameterConfigurationManager(
+                        tenant_id=current_user.current_tenant_id,
+                        tool_runtime=tool_runtime,
+                        provider_name=agent_tool_entity.provider_id,
+                        provider_type=agent_tool_entity.provider_type,
                     )
                 except Exception as e:
                     continue
-            
-            manager = ToolParameterConfigurationManager(
-                tenant_id=current_user.current_tenant_id,
-                tool_runtime=tool_runtime,
-                provider_name=agent_tool_entity.provider_id,
-                provider_type=agent_tool_entity.provider_type,
-            )
-            manager.delete_tool_parameters_cache()
-
-            # override parameters if it equals to masked parameters
-            if agent_tool_entity.tool_parameters:
-                if key not in masked_parameter_map:
-                    continue
 
-                if agent_tool_entity.tool_parameters == masked_parameter_map[key]:
-                    agent_tool_entity.tool_parameters = parameter_map[key]
+                # get decrypted parameters
+                if agent_tool_entity.tool_parameters:
+                    parameters = manager.decrypt_tool_parameters(agent_tool_entity.tool_parameters or {})
+                    masked_parameter = manager.mask_tool_parameters(parameters or {})
+                else:
+                    parameters = {}
+                    masked_parameter = {}
+
+                key = f'{agent_tool_entity.provider_id}.{agent_tool_entity.provider_type}.{agent_tool_entity.tool_name}'
+                masked_parameter_map[key] = masked_parameter
+                parameter_map[key] = parameters
+                tool_map[key] = tool_runtime
+
+            # encrypt agent tool parameters if it's secret-input
+            agent_mode = new_app_model_config.agent_mode_dict
+            for tool in agent_mode.get('tools') or []:
+                agent_tool_entity = AgentToolEntity(**tool)
+
+                # get tool
+                key = f'{agent_tool_entity.provider_id}.{agent_tool_entity.provider_type}.{agent_tool_entity.tool_name}'
+                if key in tool_map:
+                    tool_runtime = tool_map[key]
+                else:
+                    try:
+                        tool_runtime = ToolManager.get_agent_tool_runtime(
+                            tenant_id=current_user.current_tenant_id,
+                            agent_tool=agent_tool_entity,
+                        )
+                    except Exception as e:
+                        continue
+
+                manager = ToolParameterConfigurationManager(
+                    tenant_id=current_user.current_tenant_id,
+                    tool_runtime=tool_runtime,
+                    provider_name=agent_tool_entity.provider_id,
+                    provider_type=agent_tool_entity.provider_type,
+                )
+                manager.delete_tool_parameters_cache()
+
+                # override parameters if it equals to masked parameters
+                if agent_tool_entity.tool_parameters:
+                    if key not in masked_parameter_map:
+                        continue
+
+                    if agent_tool_entity.tool_parameters == masked_parameter_map[key]:
+                        agent_tool_entity.tool_parameters = parameter_map[key]
 
-            # encrypt parameters
-            if agent_tool_entity.tool_parameters:
-                tool['tool_parameters'] = manager.encrypt_tool_parameters(agent_tool_entity.tool_parameters or {})
+                # encrypt parameters
+                if agent_tool_entity.tool_parameters:
+                    tool['tool_parameters'] = manager.encrypt_tool_parameters(agent_tool_entity.tool_parameters or {})
 
-        # update app model config
-        new_app_model_config.agent_mode = json.dumps(agent_mode)
+            # update app model config
+            new_app_model_config.agent_mode = json.dumps(agent_mode)
 
         db.session.add(new_app_model_config)
         db.session.flush()
 
-        app.app_model_config_id = new_app_model_config.id
+        app_model.app_model_config_id = new_app_model_config.id
         db.session.commit()
 
         app_model_config_was_updated.send(
-            app,
+            app_model,
             app_model_config=new_app_model_config
         )
 

+ 5 - 9
api/controllers/console/app/site.py

@@ -4,7 +4,7 @@ from werkzeug.exceptions import Forbidden, NotFound
 
 from constants.languages import supported_language
 from controllers.console import api
-from controllers.console.app import _get_app
+from controllers.console.app.wraps import get_app_model
 from controllers.console.setup import setup_required
 from controllers.console.wraps import account_initialization_required
 from extensions.ext_database import db
@@ -34,13 +34,11 @@ class AppSite(Resource):
     @setup_required
     @login_required
     @account_initialization_required
+    @get_app_model
     @marshal_with(app_site_fields)
-    def post(self, app_id):
+    def post(self, app_model):
         args = parse_app_site_args()
 
-        app_id = str(app_id)
-        app_model = _get_app(app_id)
-
         # The role of the current user in the ta table must be admin or owner
         if not current_user.is_admin_or_owner:
             raise Forbidden()
@@ -82,11 +80,9 @@ class AppSiteAccessTokenReset(Resource):
     @setup_required
     @login_required
     @account_initialization_required
+    @get_app_model
     @marshal_with(app_site_fields)
-    def post(self, app_id):
-        app_id = str(app_id)
-        app_model = _get_app(app_id)
-
+    def post(self, app_model):
         # The role of the current user in the ta table must be admin or owner
         if not current_user.is_admin_or_owner:
             raise Forbidden()

+ 16 - 22
api/controllers/console/app/statistic.py

@@ -7,12 +7,13 @@ from flask_login import current_user
 from flask_restful import Resource, reqparse
 
 from controllers.console import api
-from controllers.console.app import _get_app
+from controllers.console.app.wraps import get_app_model
 from controllers.console.setup import setup_required
 from controllers.console.wraps import account_initialization_required
 from extensions.ext_database import db
 from libs.helper import datetime_string
 from libs.login import login_required
+from models.model import AppMode
 
 
 class DailyConversationStatistic(Resource):
@@ -20,10 +21,9 @@ class DailyConversationStatistic(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    def get(self, app_id):
+    @get_app_model
+    def get(self, app_model):
         account = current_user
-        app_id = str(app_id)
-        app_model = _get_app(app_id)
 
         parser = reqparse.RequestParser()
         parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
@@ -81,10 +81,9 @@ class DailyTerminalsStatistic(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    def get(self, app_id):
+    @get_app_model
+    def get(self, app_model):
         account = current_user
-        app_id = str(app_id)
-        app_model = _get_app(app_id)
 
         parser = reqparse.RequestParser()
         parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
@@ -141,10 +140,9 @@ class DailyTokenCostStatistic(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    def get(self, app_id):
+    @get_app_model
+    def get(self, app_model):
         account = current_user
-        app_id = str(app_id)
-        app_model = _get_app(app_id)
 
         parser = reqparse.RequestParser()
         parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
@@ -205,10 +203,9 @@ class AverageSessionInteractionStatistic(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    def get(self, app_id):
+    @get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
+    def get(self, app_model):
         account = current_user
-        app_id = str(app_id)
-        app_model = _get_app(app_id, 'chat')
 
         parser = reqparse.RequestParser()
         parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
@@ -271,10 +268,9 @@ class UserSatisfactionRateStatistic(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    def get(self, app_id):
+    @get_app_model
+    def get(self, app_model):
         account = current_user
-        app_id = str(app_id)
-        app_model = _get_app(app_id)
 
         parser = reqparse.RequestParser()
         parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
@@ -334,10 +330,9 @@ class AverageResponseTimeStatistic(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    def get(self, app_id):
+    @get_app_model(mode=AppMode.COMPLETION)
+    def get(self, app_model):
         account = current_user
-        app_id = str(app_id)
-        app_model = _get_app(app_id, 'completion')
 
         parser = reqparse.RequestParser()
         parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
@@ -396,10 +391,9 @@ class TokensPerSecondStatistic(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    def get(self, app_id):
+    @get_app_model
+    def get(self, app_model):
         account = current_user
-        app_id = str(app_id)
-        app_model = _get_app(app_id)
 
         parser = reqparse.RequestParser()
         parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')

+ 324 - 0
api/controllers/console/app/workflow.py

@@ -0,0 +1,324 @@
+import json
+import logging
+
+from flask import abort, request
+from flask_restful import Resource, marshal_with, reqparse
+from werkzeug.exceptions import InternalServerError, NotFound
+
+import services
+from controllers.console import api
+from controllers.console.app.error import ConversationCompletedError, DraftWorkflowNotExist
+from controllers.console.app.wraps import get_app_model
+from controllers.console.setup import setup_required
+from controllers.console.wraps import account_initialization_required
+from core.app.apps.base_app_queue_manager import AppQueueManager
+from core.app.entities.app_invoke_entities import InvokeFrom
+from fields.workflow_fields import workflow_fields
+from fields.workflow_run_fields import workflow_run_node_execution_fields
+from libs import helper
+from libs.helper import TimestampField, uuid_value
+from libs.login import current_user, login_required
+from models.model import App, AppMode
+from services.app_generate_service import AppGenerateService
+from services.workflow_service import WorkflowService
+
+logger = logging.getLogger(__name__)
+
+
+class DraftWorkflowApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
+    @marshal_with(workflow_fields)
+    def get(self, app_model: App):
+        """
+        Get draft workflow
+        """
+        # fetch draft workflow by app_model
+        workflow_service = WorkflowService()
+        workflow = workflow_service.get_draft_workflow(app_model=app_model)
+
+        if not workflow:
+            raise DraftWorkflowNotExist()
+
+        # return workflow, if not found, return None (initiate graph by frontend)
+        return workflow
+
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
+    def post(self, app_model: App):
+        """
+        Sync draft workflow
+        """
+        content_type = request.headers.get('Content-Type')
+
+        if 'application/json' in content_type:
+            parser = reqparse.RequestParser()
+            parser.add_argument('graph', type=dict, required=True, nullable=False, location='json')
+            parser.add_argument('features', type=dict, required=True, nullable=False, location='json')
+            args = parser.parse_args()
+        elif 'text/plain' in content_type:
+            try:
+                data = json.loads(request.data.decode('utf-8'))
+                if 'graph' not in data or 'features' not in data:
+                    raise ValueError('graph or features not found in data')
+
+                if not isinstance(data.get('graph'), dict) or not isinstance(data.get('features'), dict):
+                    raise ValueError('graph or features is not a dict')
+
+                args = {
+                    'graph': data.get('graph'),
+                    'features': data.get('features')
+                }
+            except json.JSONDecodeError:
+                return {'message': 'Invalid JSON data'}, 400
+        else:
+            abort(415)
+
+        workflow_service = WorkflowService()
+        workflow = workflow_service.sync_draft_workflow(
+            app_model=app_model,
+            graph=args.get('graph'),
+            features=args.get('features'),
+            account=current_user
+        )
+
+        return {
+            "result": "success",
+            "updated_at": TimestampField().format(workflow.updated_at or workflow.created_at)
+        }
+
+
+class AdvancedChatDraftWorkflowRunApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model(mode=[AppMode.ADVANCED_CHAT])
+    def post(self, app_model: App):
+        """
+        Run draft workflow
+        """
+        parser = reqparse.RequestParser()
+        parser.add_argument('inputs', type=dict, location='json')
+        parser.add_argument('query', type=str, required=True, location='json', default='')
+        parser.add_argument('files', type=list, location='json')
+        parser.add_argument('conversation_id', type=uuid_value, location='json')
+        args = parser.parse_args()
+
+        try:
+            response = AppGenerateService.generate(
+                app_model=app_model,
+                user=current_user,
+                args=args,
+                invoke_from=InvokeFrom.DEBUGGER,
+                streaming=True
+            )
+
+            return helper.compact_generate_response(response)
+        except services.errors.conversation.ConversationNotExistsError:
+            raise NotFound("Conversation Not Exists.")
+        except services.errors.conversation.ConversationCompletedError:
+            raise ConversationCompletedError()
+        except ValueError as e:
+            raise e
+        except Exception as e:
+            logging.exception("internal server error.")
+            raise InternalServerError()
+
+
+class DraftWorkflowRunApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model(mode=[AppMode.WORKFLOW])
+    def post(self, app_model: App):
+        """
+        Run draft workflow
+        """
+        parser = reqparse.RequestParser()
+        parser.add_argument('inputs', type=dict, required=True, nullable=False, location='json')
+        parser.add_argument('files', type=list, required=False, location='json')
+        args = parser.parse_args()
+
+        try:
+            response = AppGenerateService.generate(
+                app_model=app_model,
+                user=current_user,
+                args=args,
+                invoke_from=InvokeFrom.DEBUGGER,
+                streaming=True
+            )
+
+            return helper.compact_generate_response(response)
+        except ValueError as e:
+            raise e
+        except Exception as e:
+            logging.exception("internal server error.")
+            raise InternalServerError()
+
+
+class WorkflowTaskStopApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
+    def post(self, app_model: App, task_id: str):
+        """
+        Stop workflow task
+        """
+        AppQueueManager.set_stop_flag(task_id, InvokeFrom.DEBUGGER, current_user.id)
+
+        return {
+            "result": "success"
+        }
+
+
+class DraftWorkflowNodeRunApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
+    @marshal_with(workflow_run_node_execution_fields)
+    def post(self, app_model: App, node_id: str):
+        """
+        Run draft workflow node
+        """
+        parser = reqparse.RequestParser()
+        parser.add_argument('inputs', type=dict, required=True, nullable=False, location='json')
+        args = parser.parse_args()
+
+        workflow_service = WorkflowService()
+        workflow_node_execution = workflow_service.run_draft_workflow_node(
+            app_model=app_model,
+            node_id=node_id,
+            user_inputs=args.get('inputs'),
+            account=current_user
+        )
+
+        return workflow_node_execution
+
+
+class PublishedWorkflowApi(Resource):
+
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
+    @marshal_with(workflow_fields)
+    def get(self, app_model: App):
+        """
+        Get published workflow
+        """
+        # fetch published workflow by app_model
+        workflow_service = WorkflowService()
+        workflow = workflow_service.get_published_workflow(app_model=app_model)
+
+        # return workflow, if not found, return None
+        return workflow
+
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
+    def post(self, app_model: App):
+        """
+        Publish workflow
+        """
+        workflow_service = WorkflowService()
+        workflow = workflow_service.publish_workflow(app_model=app_model, account=current_user)
+
+        return {
+            "result": "success",
+            "created_at": TimestampField().format(workflow.created_at)
+        }
+
+
+class DefaultBlockConfigsApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
+    def get(self, app_model: App):
+        """
+        Get default block config
+        """
+        # Get default block configs
+        workflow_service = WorkflowService()
+        return workflow_service.get_default_block_configs()
+
+
+class DefaultBlockConfigApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
+    def get(self, app_model: App, block_type: str):
+        """
+        Get default block config
+        """
+        parser = reqparse.RequestParser()
+        parser.add_argument('q', type=str, location='args')
+        args = parser.parse_args()
+
+        filters = None
+        if args.get('q'):
+            try:
+                filters = json.loads(args.get('q'))
+            except json.JSONDecodeError:
+                raise ValueError('Invalid filters')
+
+        # Get default block configs
+        workflow_service = WorkflowService()
+        return workflow_service.get_default_block_config(
+            node_type=block_type,
+            filters=filters
+        )
+
+
+class ConvertToWorkflowApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model(mode=[AppMode.CHAT, AppMode.COMPLETION])
+    def post(self, app_model: App):
+        """
+        Convert basic mode of chatbot app to workflow mode
+        Convert expert mode of chatbot app to workflow mode
+        Convert Completion App to Workflow App
+        """
+        if request.data:
+            parser = reqparse.RequestParser()
+            parser.add_argument('name', type=str, required=False, nullable=True, location='json')
+            parser.add_argument('icon', type=str, required=False, nullable=True, location='json')
+            parser.add_argument('icon_background', type=str, required=False, nullable=True, location='json')
+            args = parser.parse_args()
+        else:
+            args = {}
+
+        # convert to workflow mode
+        workflow_service = WorkflowService()
+        new_app_model = workflow_service.convert_to_workflow(
+            app_model=app_model,
+            account=current_user,
+            args=args
+        )
+
+        # return app id
+        return {
+            'new_app_id': new_app_model.id,
+        }
+
+
+api.add_resource(DraftWorkflowApi, '/apps/<uuid:app_id>/workflows/draft')
+api.add_resource(AdvancedChatDraftWorkflowRunApi, '/apps/<uuid:app_id>/advanced-chat/workflows/draft/run')
+api.add_resource(DraftWorkflowRunApi, '/apps/<uuid:app_id>/workflows/draft/run')
+api.add_resource(WorkflowTaskStopApi, '/apps/<uuid:app_id>/workflow-runs/tasks/<string:task_id>/stop')
+api.add_resource(DraftWorkflowNodeRunApi, '/apps/<uuid:app_id>/workflows/draft/nodes/<string:node_id>/run')
+api.add_resource(PublishedWorkflowApi, '/apps/<uuid:app_id>/workflows/publish')
+api.add_resource(DefaultBlockConfigsApi, '/apps/<uuid:app_id>/workflows/default-workflow-block-configs')
+api.add_resource(DefaultBlockConfigApi, '/apps/<uuid:app_id>/workflows/default-workflow-block-configs'
+                                        '/<string:block_type>')
+api.add_resource(ConvertToWorkflowApi, '/apps/<uuid:app_id>/convert-to-workflow')

+ 41 - 0
api/controllers/console/app/workflow_app_log.py

@@ -0,0 +1,41 @@
+from flask_restful import Resource, marshal_with, reqparse
+from flask_restful.inputs import int_range
+
+from controllers.console import api
+from controllers.console.app.wraps import get_app_model
+from controllers.console.setup import setup_required
+from controllers.console.wraps import account_initialization_required
+from fields.workflow_app_log_fields import workflow_app_log_pagination_fields
+from libs.login import login_required
+from models.model import App, AppMode
+from services.workflow_app_service import WorkflowAppService
+
+
+class WorkflowAppLogApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model(mode=[AppMode.WORKFLOW])
+    @marshal_with(workflow_app_log_pagination_fields)
+    def get(self, app_model: App):
+        """
+        Get workflow app logs
+        """
+        parser = reqparse.RequestParser()
+        parser.add_argument('keyword', type=str, location='args')
+        parser.add_argument('status', type=str, choices=['succeeded', 'failed', 'stopped'], location='args')
+        parser.add_argument('page', type=int_range(1, 99999), default=1, location='args')
+        parser.add_argument('limit', type=int_range(1, 100), default=20, location='args')
+        args = parser.parse_args()
+
+        # get paginate workflow app logs
+        workflow_app_service = WorkflowAppService()
+        workflow_app_log_pagination = workflow_app_service.get_paginate_workflow_app_logs(
+            app_model=app_model,
+            args=args
+        )
+
+        return workflow_app_log_pagination
+
+
+api.add_resource(WorkflowAppLogApi, '/apps/<uuid:app_id>/workflow-app-logs')

+ 109 - 0
api/controllers/console/app/workflow_run.py

@@ -0,0 +1,109 @@
+from flask_restful import Resource, marshal_with, reqparse
+from flask_restful.inputs import int_range
+
+from controllers.console import api
+from controllers.console.app.wraps import get_app_model
+from controllers.console.setup import setup_required
+from controllers.console.wraps import account_initialization_required
+from fields.workflow_run_fields import (
+    advanced_chat_workflow_run_pagination_fields,
+    workflow_run_detail_fields,
+    workflow_run_node_execution_list_fields,
+    workflow_run_pagination_fields,
+)
+from libs.helper import uuid_value
+from libs.login import login_required
+from models.model import App, AppMode
+from services.workflow_run_service import WorkflowRunService
+
+
+class AdvancedChatAppWorkflowRunListApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model(mode=[AppMode.ADVANCED_CHAT])
+    @marshal_with(advanced_chat_workflow_run_pagination_fields)
+    def get(self, app_model: App):
+        """
+        Get advanced chat app workflow run list
+        """
+        parser = reqparse.RequestParser()
+        parser.add_argument('last_id', type=uuid_value, location='args')
+        parser.add_argument('limit', type=int_range(1, 100), required=False, default=20, location='args')
+        args = parser.parse_args()
+
+        workflow_run_service = WorkflowRunService()
+        result = workflow_run_service.get_paginate_advanced_chat_workflow_runs(
+            app_model=app_model,
+            args=args
+        )
+
+        return result
+
+
+class WorkflowRunListApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
+    @marshal_with(workflow_run_pagination_fields)
+    def get(self, app_model: App):
+        """
+        Get workflow run list
+        """
+        parser = reqparse.RequestParser()
+        parser.add_argument('last_id', type=uuid_value, location='args')
+        parser.add_argument('limit', type=int_range(1, 100), required=False, default=20, location='args')
+        args = parser.parse_args()
+
+        workflow_run_service = WorkflowRunService()
+        result = workflow_run_service.get_paginate_workflow_runs(
+            app_model=app_model,
+            args=args
+        )
+
+        return result
+
+
+class WorkflowRunDetailApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
+    @marshal_with(workflow_run_detail_fields)
+    def get(self, app_model: App, run_id):
+        """
+        Get workflow run detail
+        """
+        run_id = str(run_id)
+
+        workflow_run_service = WorkflowRunService()
+        workflow_run = workflow_run_service.get_workflow_run(app_model=app_model, run_id=run_id)
+
+        return workflow_run
+
+
+class WorkflowRunNodeExecutionListApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
+    @marshal_with(workflow_run_node_execution_list_fields)
+    def get(self, app_model: App, run_id):
+        """
+        Get workflow run node execution list
+        """
+        run_id = str(run_id)
+
+        workflow_run_service = WorkflowRunService()
+        node_executions = workflow_run_service.get_workflow_run_node_executions(app_model=app_model, run_id=run_id)
+
+        return {
+            'data': node_executions
+        }
+
+
+api.add_resource(AdvancedChatAppWorkflowRunListApi, '/apps/<uuid:app_id>/advanced-chat/workflow-runs')
+api.add_resource(WorkflowRunListApi, '/apps/<uuid:app_id>/workflow-runs')
+api.add_resource(WorkflowRunDetailApi, '/apps/<uuid:app_id>/workflow-runs/<uuid:run_id>')
+api.add_resource(WorkflowRunNodeExecutionListApi, '/apps/<uuid:app_id>/workflow-runs/<uuid:run_id>/node-executions')

+ 278 - 0
api/controllers/console/app/workflow_statistic.py

@@ -0,0 +1,278 @@
+from datetime import datetime
+from decimal import Decimal
+
+import pytz
+from flask import jsonify
+from flask_login import current_user
+from flask_restful import Resource, reqparse
+
+from controllers.console import api
+from controllers.console.app.wraps import get_app_model
+from controllers.console.setup import setup_required
+from controllers.console.wraps import account_initialization_required
+from extensions.ext_database import db
+from libs.helper import datetime_string
+from libs.login import login_required
+from models.model import AppMode
+from models.workflow import WorkflowRunTriggeredFrom
+
+
+class WorkflowDailyRunsStatistic(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model
+    def get(self, app_model):
+        account = current_user
+
+        parser = reqparse.RequestParser()
+        parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
+        parser.add_argument('end', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
+        args = parser.parse_args()
+
+        sql_query = '''
+        SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(id) AS runs
+            FROM workflow_runs 
+            WHERE app_id = :app_id 
+                AND triggered_from = :triggered_from
+        '''
+        arg_dict = {'tz': account.timezone, 'app_id': app_model.id, 'triggered_from': WorkflowRunTriggeredFrom.APP_RUN.value}
+
+        timezone = pytz.timezone(account.timezone)
+        utc_timezone = pytz.utc
+
+        if args['start']:
+            start_datetime = datetime.strptime(args['start'], '%Y-%m-%d %H:%M')
+            start_datetime = start_datetime.replace(second=0)
+
+            start_datetime_timezone = timezone.localize(start_datetime)
+            start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
+
+            sql_query += ' and created_at >= :start'
+            arg_dict['start'] = start_datetime_utc
+
+        if args['end']:
+            end_datetime = datetime.strptime(args['end'], '%Y-%m-%d %H:%M')
+            end_datetime = end_datetime.replace(second=0)
+
+            end_datetime_timezone = timezone.localize(end_datetime)
+            end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
+
+            sql_query += ' and created_at < :end'
+            arg_dict['end'] = end_datetime_utc
+
+        sql_query += ' GROUP BY date order by date'
+
+        response_data = []
+
+        with db.engine.begin() as conn:
+            rs = conn.execute(db.text(sql_query), arg_dict)
+            for i in rs:
+                response_data.append({
+                    'date': str(i.date),
+                    'runs': i.runs
+                })
+
+        return jsonify({
+            'data': response_data
+        })
+
+class WorkflowDailyTerminalsStatistic(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model
+    def get(self, app_model):
+        account = current_user
+
+        parser = reqparse.RequestParser()
+        parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
+        parser.add_argument('end', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
+        args = parser.parse_args()
+
+        sql_query = '''
+                SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(distinct workflow_runs.created_by) AS terminal_count
+                    FROM workflow_runs 
+                    WHERE app_id = :app_id 
+                        AND triggered_from = :triggered_from
+                '''
+        arg_dict = {'tz': account.timezone, 'app_id': app_model.id, 'triggered_from': WorkflowRunTriggeredFrom.APP_RUN.value}
+
+        timezone = pytz.timezone(account.timezone)
+        utc_timezone = pytz.utc
+
+        if args['start']:
+            start_datetime = datetime.strptime(args['start'], '%Y-%m-%d %H:%M')
+            start_datetime = start_datetime.replace(second=0)
+
+            start_datetime_timezone = timezone.localize(start_datetime)
+            start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
+
+            sql_query += ' and created_at >= :start'
+            arg_dict['start'] = start_datetime_utc
+
+        if args['end']:
+            end_datetime = datetime.strptime(args['end'], '%Y-%m-%d %H:%M')
+            end_datetime = end_datetime.replace(second=0)
+
+            end_datetime_timezone = timezone.localize(end_datetime)
+            end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
+
+            sql_query += ' and created_at < :end'
+            arg_dict['end'] = end_datetime_utc
+
+        sql_query += ' GROUP BY date order by date'
+
+        response_data = []
+
+        with db.engine.begin() as conn:
+            rs = conn.execute(db.text(sql_query), arg_dict)            
+            for i in rs:
+                response_data.append({
+                    'date': str(i.date),
+                    'terminal_count': i.terminal_count
+                })
+
+        return jsonify({
+            'data': response_data
+        })
+
+class WorkflowDailyTokenCostStatistic(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model
+    def get(self, app_model):
+        account = current_user
+
+        parser = reqparse.RequestParser()
+        parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
+        parser.add_argument('end', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
+        args = parser.parse_args()
+
+        sql_query = '''
+                SELECT 
+                    date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, 
+                    SUM(workflow_runs.total_tokens) as token_count
+                FROM workflow_runs 
+                WHERE app_id = :app_id 
+                    AND triggered_from = :triggered_from
+                '''
+        arg_dict = {'tz': account.timezone, 'app_id': app_model.id, 'triggered_from': WorkflowRunTriggeredFrom.APP_RUN.value}
+
+        timezone = pytz.timezone(account.timezone)
+        utc_timezone = pytz.utc
+
+        if args['start']:
+            start_datetime = datetime.strptime(args['start'], '%Y-%m-%d %H:%M')
+            start_datetime = start_datetime.replace(second=0)
+
+            start_datetime_timezone = timezone.localize(start_datetime)
+            start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
+
+            sql_query += ' and created_at >= :start'
+            arg_dict['start'] = start_datetime_utc
+
+        if args['end']:
+            end_datetime = datetime.strptime(args['end'], '%Y-%m-%d %H:%M')
+            end_datetime = end_datetime.replace(second=0)
+
+            end_datetime_timezone = timezone.localize(end_datetime)
+            end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
+
+            sql_query += ' and created_at < :end'
+            arg_dict['end'] = end_datetime_utc
+
+        sql_query += ' GROUP BY date order by date'
+
+        response_data = []
+
+        with db.engine.begin() as conn:
+            rs = conn.execute(db.text(sql_query), arg_dict)
+            for i in rs:
+                response_data.append({
+                    'date': str(i.date),
+                    'token_count': i.token_count,
+                })
+
+        return jsonify({
+            'data': response_data
+        })
+
+class WorkflowAverageAppInteractionStatistic(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    @get_app_model(mode=[AppMode.WORKFLOW])
+    def get(self, app_model):
+        account = current_user
+
+        parser = reqparse.RequestParser()
+        parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
+        parser.add_argument('end', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
+        args = parser.parse_args()
+
+        sql_query = """
+            SELECT 
+                AVG(sub.interactions) as interactions,
+                sub.date
+            FROM
+                (SELECT 
+                    date(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, 
+                    c.created_by,
+                    COUNT(c.id) AS interactions
+                FROM workflow_runs c
+                WHERE c.app_id = :app_id
+                    AND c.triggered_from = :triggered_from
+                    {{start}}
+                    {{end}}
+                GROUP BY date, c.created_by) sub
+            GROUP BY sub.created_by, sub.date
+            """
+        arg_dict = {'tz': account.timezone, 'app_id': app_model.id, 'triggered_from': WorkflowRunTriggeredFrom.APP_RUN.value}
+
+        timezone = pytz.timezone(account.timezone)
+        utc_timezone = pytz.utc
+
+        if args['start']:
+            start_datetime = datetime.strptime(args['start'], '%Y-%m-%d %H:%M')
+            start_datetime = start_datetime.replace(second=0)
+
+            start_datetime_timezone = timezone.localize(start_datetime)
+            start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
+
+            sql_query = sql_query.replace('{{start}}', ' AND c.created_at >= :start')
+            arg_dict['start'] = start_datetime_utc
+        else:
+            sql_query = sql_query.replace('{{start}}', '')
+
+        if args['end']:
+            end_datetime = datetime.strptime(args['end'], '%Y-%m-%d %H:%M')
+            end_datetime = end_datetime.replace(second=0)
+
+            end_datetime_timezone = timezone.localize(end_datetime)
+            end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
+
+            sql_query = sql_query.replace('{{end}}', ' and c.created_at < :end')
+            arg_dict['end'] = end_datetime_utc
+        else:
+            sql_query = sql_query.replace('{{end}}', '')
+
+        response_data = []
+        
+        with db.engine.begin() as conn:
+            rs = conn.execute(db.text(sql_query), arg_dict)
+            for i in rs:
+                response_data.append({
+                    'date': str(i.date),
+                    'interactions': float(i.interactions.quantize(Decimal('0.01')))
+                })
+
+        return jsonify({
+            'data': response_data
+        })
+
+api.add_resource(WorkflowDailyRunsStatistic, '/apps/<uuid:app_id>/workflow/statistics/daily-conversations')
+api.add_resource(WorkflowDailyTerminalsStatistic, '/apps/<uuid:app_id>/workflow/statistics/daily-terminals')
+api.add_resource(WorkflowDailyTokenCostStatistic, '/apps/<uuid:app_id>/workflow/statistics/token-costs')
+api.add_resource(WorkflowAverageAppInteractionStatistic, '/apps/<uuid:app_id>/workflow/statistics/average-app-interactions')

+ 55 - 0
api/controllers/console/app/wraps.py

@@ -0,0 +1,55 @@
+from collections.abc import Callable
+from functools import wraps
+from typing import Optional, Union
+
+from controllers.console.app.error import AppNotFoundError
+from extensions.ext_database import db
+from libs.login import current_user
+from models.model import App, AppMode
+
+
+def get_app_model(view: Optional[Callable] = None, *,
+                  mode: Union[AppMode, list[AppMode]] = None):
+    def decorator(view_func):
+        @wraps(view_func)
+        def decorated_view(*args, **kwargs):
+            if not kwargs.get('app_id'):
+                raise ValueError('missing app_id in path parameters')
+
+            app_id = kwargs.get('app_id')
+            app_id = str(app_id)
+
+            del kwargs['app_id']
+
+            app_model = db.session.query(App).filter(
+                App.id == app_id,
+                App.tenant_id == current_user.current_tenant_id,
+                App.status == 'normal'
+            ).first()
+
+            if not app_model:
+                raise AppNotFoundError()
+
+            app_mode = AppMode.value_of(app_model.mode)
+            if app_mode == AppMode.CHANNEL:
+                raise AppNotFoundError()
+
+            if mode is not None:
+                if isinstance(mode, list):
+                    modes = mode
+                else:
+                    modes = [mode]
+
+                if app_mode not in modes:
+                    mode_values = {m.value for m in modes}
+                    raise AppNotFoundError(f"App mode is not in the supported list: {mode_values}")
+
+            kwargs['app_model'] = app_model
+
+            return view_func(*args, **kwargs)
+        return decorated_view
+
+    if view is None:
+        return decorator
+    else:
+        return decorator(view)

+ 3 - 12
api/controllers/console/explore/audio.py

@@ -19,7 +19,6 @@ from controllers.console.app.error import (
 from controllers.console.explore.wraps import InstalledAppResource
 from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
 from core.model_runtime.errors.invoke import InvokeError
-from models.model import AppModelConfig
 from services.audio_service import AudioService
 from services.errors.audio import (
     AudioTooLargeServiceError,
@@ -32,16 +31,12 @@ from services.errors.audio import (
 class ChatAudioApi(InstalledAppResource):
     def post(self, installed_app):
         app_model = installed_app.app
-        app_model_config: AppModelConfig = app_model.app_model_config
-
-        if not app_model_config.speech_to_text_dict['enabled']:
-            raise AppUnavailableError()
 
         file = request.files['file']
 
         try:
             response = AudioService.transcript_asr(
-                tenant_id=app_model.tenant_id,
+                app_model=app_model,
                 file=file,
                 end_user=None
             )
@@ -76,16 +71,12 @@ class ChatAudioApi(InstalledAppResource):
 class ChatTextApi(InstalledAppResource):
     def post(self, installed_app):
         app_model = installed_app.app
-        app_model_config: AppModelConfig = app_model.app_model_config
-
-        if not app_model_config.text_to_speech_dict['enabled']:
-            raise AppUnavailableError()
 
         try:
             response = AudioService.transcript_tts(
-                tenant_id=app_model.tenant_id,
+                app_model=app_model,
                 text=request.form['text'],
-                voice=request.form['voice'] if request.form['voice'] else app_model.app_model_config.text_to_speech_dict.get('voice'),
+                voice=request.form.get('voice'),
                 streaming=False
             )
             return {'data': response.data.decode('latin1')}

+ 16 - 29
api/controllers/console/explore/completion.py

@@ -1,10 +1,6 @@
-import json
 import logging
-from collections.abc import Generator
 from datetime import datetime
-from typing import Union
 
-from flask import Response, stream_with_context
 from flask_login import current_user
 from flask_restful import reqparse
 from werkzeug.exceptions import InternalServerError, NotFound
@@ -21,13 +17,15 @@ from controllers.console.app.error import (
 )
 from controllers.console.explore.error import NotChatAppError, NotCompletionAppError
 from controllers.console.explore.wraps import InstalledAppResource
-from core.application_queue_manager import ApplicationQueueManager
-from core.entities.application_entities import InvokeFrom
+from core.app.apps.base_app_queue_manager import AppQueueManager
+from core.app.entities.app_invoke_entities import InvokeFrom
 from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
 from core.model_runtime.errors.invoke import InvokeError
 from extensions.ext_database import db
+from libs import helper
 from libs.helper import uuid_value
-from services.completion_service import CompletionService
+from models.model import AppMode
+from services.app_generate_service import AppGenerateService
 
 
 # define completion api for user
@@ -53,7 +51,7 @@ class CompletionApi(InstalledAppResource):
         db.session.commit()
 
         try:
-            response = CompletionService.completion(
+            response = AppGenerateService.generate(
                 app_model=app_model,
                 user=current_user,
                 args=args,
@@ -61,7 +59,7 @@ class CompletionApi(InstalledAppResource):
                 streaming=streaming
             )
 
-            return compact_response(response)
+            return helper.compact_generate_response(response)
         except services.errors.conversation.ConversationNotExistsError:
             raise NotFound("Conversation Not Exists.")
         except services.errors.conversation.ConversationCompletedError:
@@ -90,7 +88,7 @@ class CompletionStopApi(InstalledAppResource):
         if app_model.mode != 'completion':
             raise NotCompletionAppError()
 
-        ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.EXPLORE, current_user.id)
+        AppQueueManager.set_stop_flag(task_id, InvokeFrom.EXPLORE, current_user.id)
 
         return {'result': 'success'}, 200
 
@@ -98,34 +96,33 @@ class CompletionStopApi(InstalledAppResource):
 class ChatApi(InstalledAppResource):
     def post(self, installed_app):
         app_model = installed_app.app
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         parser = reqparse.RequestParser()
         parser.add_argument('inputs', type=dict, required=True, location='json')
         parser.add_argument('query', type=str, required=True, location='json')
         parser.add_argument('files', type=list, required=False, location='json')
-        parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
         parser.add_argument('conversation_id', type=uuid_value, location='json')
         parser.add_argument('retriever_from', type=str, required=False, default='explore_app', location='json')
         args = parser.parse_args()
 
-        streaming = args['response_mode'] == 'streaming'
         args['auto_generate_name'] = False
 
         installed_app.last_used_at = datetime.utcnow()
         db.session.commit()
 
         try:
-            response = CompletionService.completion(
+            response = AppGenerateService.generate(
                 app_model=app_model,
                 user=current_user,
                 args=args,
                 invoke_from=InvokeFrom.EXPLORE,
-                streaming=streaming
+                streaming=True
             )
 
-            return compact_response(response)
+            return helper.compact_generate_response(response)
         except services.errors.conversation.ConversationNotExistsError:
             raise NotFound("Conversation Not Exists.")
         except services.errors.conversation.ConversationCompletedError:
@@ -151,25 +148,15 @@ class ChatApi(InstalledAppResource):
 class ChatStopApi(InstalledAppResource):
     def post(self, installed_app, task_id):
         app_model = installed_app.app
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
-        ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.EXPLORE, current_user.id)
+        AppQueueManager.set_stop_flag(task_id, InvokeFrom.EXPLORE, current_user.id)
 
         return {'result': 'success'}, 200
 
 
-def compact_response(response: Union[dict, Generator]) -> Response:
-    if isinstance(response, dict):
-        return Response(response=json.dumps(response), status=200, mimetype='application/json')
-    else:
-        def generate() -> Generator:
-            yield from response
-
-        return Response(stream_with_context(generate()), status=200,
-                        mimetype='text/event-stream')
-
-
 api.add_resource(CompletionApi, '/installed-apps/<uuid:installed_app_id>/completion-messages', endpoint='installed_app_completion')
 api.add_resource(CompletionStopApi, '/installed-apps/<uuid:installed_app_id>/completion-messages/<string:task_id>/stop', endpoint='installed_app_stop_completion')
 api.add_resource(ChatApi, '/installed-apps/<uuid:installed_app_id>/chat-messages', endpoint='installed_app_chat_completion')

+ 11 - 5
api/controllers/console/explore/conversation.py

@@ -8,6 +8,7 @@ from controllers.console.explore.error import NotChatAppError
 from controllers.console.explore.wraps import InstalledAppResource
 from fields.conversation_fields import conversation_infinite_scroll_pagination_fields, simple_conversation_fields
 from libs.helper import uuid_value
+from models.model import AppMode
 from services.conversation_service import ConversationService
 from services.errors.conversation import ConversationNotExistsError, LastConversationNotExistsError
 from services.web_conversation_service import WebConversationService
@@ -18,7 +19,8 @@ class ConversationListApi(InstalledAppResource):
     @marshal_with(conversation_infinite_scroll_pagination_fields)
     def get(self, installed_app):
         app_model = installed_app.app
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         parser = reqparse.RequestParser()
@@ -47,7 +49,8 @@ class ConversationListApi(InstalledAppResource):
 class ConversationApi(InstalledAppResource):
     def delete(self, installed_app, c_id):
         app_model = installed_app.app
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         conversation_id = str(c_id)
@@ -65,7 +68,8 @@ class ConversationRenameApi(InstalledAppResource):
     @marshal_with(simple_conversation_fields)
     def post(self, installed_app, c_id):
         app_model = installed_app.app
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         conversation_id = str(c_id)
@@ -91,7 +95,8 @@ class ConversationPinApi(InstalledAppResource):
 
     def patch(self, installed_app, c_id):
         app_model = installed_app.app
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         conversation_id = str(c_id)
@@ -107,7 +112,8 @@ class ConversationPinApi(InstalledAppResource):
 class ConversationUnPinApi(InstalledAppResource):
     def patch(self, installed_app, c_id):
         app_model = installed_app.app
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         conversation_id = str(c_id)

+ 7 - 1
api/controllers/console/explore/error.py

@@ -9,7 +9,13 @@ class NotCompletionAppError(BaseHTTPException):
 
 class NotChatAppError(BaseHTTPException):
     error_code = 'not_chat_app'
-    description = "Not Chat App"
+    description = "App mode is invalid."
+    code = 400
+
+
+class NotWorkflowAppError(BaseHTTPException):
+    error_code = 'not_workflow_app'
+    description = "Only support workflow app."
     code = 400
 
 

+ 1 - 2
api/controllers/console/explore/installed_app.py

@@ -34,8 +34,7 @@ class InstalledAppsListApi(Resource):
                 'is_pinned': installed_app.is_pinned,
                 'last_used_at': installed_app.last_used_at,
                 'editable': current_user.role in ["owner", "admin"],
-                'uninstallable': current_tenant_id == installed_app.app_owner_tenant_id,
-                'is_agent': installed_app.is_agent
+                'uninstallable': current_tenant_id == installed_app.app_owner_tenant_id
             }
             for installed_app in installed_apps
         ]

+ 13 - 23
api/controllers/console/explore/message.py

@@ -1,9 +1,5 @@
-import json
 import logging
-from collections.abc import Generator
-from typing import Union
 
-from flask import Response, stream_with_context
 from flask_login import current_user
 from flask_restful import marshal_with, reqparse
 from flask_restful.inputs import int_range
@@ -24,12 +20,14 @@ from controllers.console.explore.error import (
     NotCompletionAppError,
 )
 from controllers.console.explore.wraps import InstalledAppResource
-from core.entities.application_entities import InvokeFrom
+from core.app.entities.app_invoke_entities import InvokeFrom
 from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
 from core.model_runtime.errors.invoke import InvokeError
 from fields.message_fields import message_infinite_scroll_pagination_fields
+from libs import helper
 from libs.helper import uuid_value
-from services.completion_service import CompletionService
+from models.model import AppMode
+from services.app_generate_service import AppGenerateService
 from services.errors.app import MoreLikeThisDisabledError
 from services.errors.conversation import ConversationNotExistsError
 from services.errors.message import MessageNotExistsError, SuggestedQuestionsAfterAnswerDisabledError
@@ -41,7 +39,8 @@ class MessageListApi(InstalledAppResource):
     def get(self, installed_app):
         app_model = installed_app.app
 
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         parser = reqparse.RequestParser()
@@ -91,14 +90,14 @@ class MessageMoreLikeThisApi(InstalledAppResource):
         streaming = args['response_mode'] == 'streaming'
 
         try:
-            response = CompletionService.generate_more_like_this(
+            response = AppGenerateService.generate_more_like_this(
                 app_model=app_model,
                 user=current_user,
                 message_id=message_id,
                 invoke_from=InvokeFrom.EXPLORE,
                 streaming=streaming
             )
-            return compact_response(response)
+            return helper.compact_generate_response(response)
         except MessageNotExistsError:
             raise NotFound("Message Not Exists.")
         except MoreLikeThisDisabledError:
@@ -118,22 +117,12 @@ class MessageMoreLikeThisApi(InstalledAppResource):
             raise InternalServerError()
 
 
-def compact_response(response: Union[dict, Generator]) -> Response:
-    if isinstance(response, dict):
-        return Response(response=json.dumps(response), status=200, mimetype='application/json')
-    else:
-        def generate() -> Generator:
-            yield from response
-
-        return Response(stream_with_context(generate()), status=200,
-                        mimetype='text/event-stream')
-
-
 class MessageSuggestedQuestionApi(InstalledAppResource):
     def get(self, installed_app, message_id):
         app_model = installed_app.app
-        if app_model.mode != 'chat':
-            raise NotCompletionAppError()
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
+            raise NotChatAppError()
 
         message_id = str(message_id)
 
@@ -141,7 +130,8 @@ class MessageSuggestedQuestionApi(InstalledAppResource):
             questions = MessageService.get_suggested_questions_after_answer(
                 app_model=app_model,
                 user=current_user,
-                message_id=message_id
+                message_id=message_id,
+                invoke_from=InvokeFrom.EXPLORE
             )
         except MessageNotExistsError:
             raise NotFound("Message not found")

+ 39 - 49
api/controllers/console/explore/parameter.py

@@ -1,13 +1,12 @@
-import json
 
 from flask import current_app
 from flask_restful import fields, marshal_with
 
 from controllers.console import api
+from controllers.console.app.error import AppUnavailableError
 from controllers.console.explore.wraps import InstalledAppResource
-from extensions.ext_database import db
-from models.model import AppModelConfig, InstalledApp
-from models.tools import ApiToolProvider
+from models.model import AppMode, InstalledApp
+from services.app_service import AppService
 
 
 class AppParameterApi(InstalledAppResource):
@@ -45,61 +44,52 @@ class AppParameterApi(InstalledAppResource):
     def get(self, installed_app: InstalledApp):
         """Retrieve app parameters."""
         app_model = installed_app.app
-        app_model_config = app_model.app_model_config
+
+        if app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]:
+            workflow = app_model.workflow
+            if workflow is None:
+                raise AppUnavailableError()
+
+            features_dict = workflow.features_dict
+            user_input_form = workflow.user_input_form(to_old_structure=True)
+        else:
+            app_model_config = app_model.app_model_config
+            features_dict = app_model_config.to_dict()
+
+            user_input_form = features_dict.get('user_input_form', [])
 
         return {
-            'opening_statement': app_model_config.opening_statement,
-            'suggested_questions': app_model_config.suggested_questions_list,
-            'suggested_questions_after_answer': app_model_config.suggested_questions_after_answer_dict,
-            'speech_to_text': app_model_config.speech_to_text_dict,
-            'text_to_speech': app_model_config.text_to_speech_dict,
-            'retriever_resource': app_model_config.retriever_resource_dict,
-            'annotation_reply': app_model_config.annotation_reply_dict,
-            'more_like_this': app_model_config.more_like_this_dict,
-            'user_input_form': app_model_config.user_input_form_list,
-            'sensitive_word_avoidance': app_model_config.sensitive_word_avoidance_dict,
-            'file_upload': app_model_config.file_upload_dict,
+            'opening_statement': features_dict.get('opening_statement'),
+            'suggested_questions': features_dict.get('suggested_questions', []),
+            'suggested_questions_after_answer': features_dict.get('suggested_questions_after_answer',
+                                                                  {"enabled": False}),
+            'speech_to_text': features_dict.get('speech_to_text', {"enabled": False}),
+            'text_to_speech': features_dict.get('text_to_speech', {"enabled": False}),
+            'retriever_resource': features_dict.get('retriever_resource', {"enabled": False}),
+            'annotation_reply': features_dict.get('annotation_reply', {"enabled": False}),
+            'more_like_this': features_dict.get('more_like_this', {"enabled": False}),
+            'user_input_form': user_input_form,
+            'sensitive_word_avoidance': features_dict.get('sensitive_word_avoidance',
+                                                          {"enabled": False, "type": "", "configs": []}),
+            'file_upload': features_dict.get('file_upload', {"image": {
+                                                     "enabled": False,
+                                                     "number_limits": 3,
+                                                     "detail": "high",
+                                                     "transfer_methods": ["remote_url", "local_file"]
+                                                 }}),
             'system_parameters': {
                 'image_file_size_limit': current_app.config.get('UPLOAD_IMAGE_FILE_SIZE_LIMIT')
             }
         }
 
+
 class ExploreAppMetaApi(InstalledAppResource):
     def get(self, installed_app: InstalledApp):
         """Get app meta"""
-        app_model_config: AppModelConfig = installed_app.app.app_model_config
-
-        agent_config = app_model_config.agent_mode_dict or {}
-        meta = {
-            'tool_icons': {}
-        }
-
-        # get all tools
-        tools = agent_config.get('tools', [])
-        url_prefix = (current_app.config.get("CONSOLE_API_URL")
-                  + "/console/api/workspaces/current/tool-provider/builtin/")
-        for tool in tools:
-            keys = list(tool.keys())
-            if len(keys) >= 4:
-                # current tool standard
-                provider_type = tool.get('provider_type')
-                provider_id = tool.get('provider_id')
-                tool_name = tool.get('tool_name')
-                if provider_type == 'builtin':
-                    meta['tool_icons'][tool_name] = url_prefix + provider_id + '/icon'
-                elif provider_type == 'api':
-                    try:
-                        provider: ApiToolProvider = db.session.query(ApiToolProvider).filter(
-                            ApiToolProvider.id == provider_id
-                        )
-                        meta['tool_icons'][tool_name] = json.loads(provider.icon)
-                    except:
-                        meta['tool_icons'][tool_name] =  {
-                            "background": "#252525",
-                            "content": "\ud83d\ude01"
-                        }
+        app_model = installed_app.app
+        return AppService().get_app_meta(app_model)
 
-        return meta
 
-api.add_resource(AppParameterApi, '/installed-apps/<uuid:installed_app_id>/parameters', endpoint='installed_app_parameters')
+api.add_resource(AppParameterApi, '/installed-apps/<uuid:installed_app_id>/parameters',
+                 endpoint='installed_app_parameters')
 api.add_resource(ExploreAppMetaApi, '/installed-apps/<uuid:installed_app_id>/meta', endpoint='installed_app_meta')

+ 15 - 98
api/controllers/console/explore/recommended_app.py

@@ -1,15 +1,11 @@
 from flask_login import current_user
-from flask_restful import Resource, fields, marshal_with
-from sqlalchemy import and_
+from flask_restful import Resource, fields, marshal_with, reqparse
 
 from constants.languages import languages
 from controllers.console import api
-from controllers.console.app.error import AppNotFoundError
 from controllers.console.wraps import account_initialization_required
-from extensions.ext_database import db
 from libs.login import login_required
-from models.model import App, InstalledApp, RecommendedApp
-from services.account_service import TenantService
+from services.recommended_app_service import RecommendedAppService
 
 app_fields = {
     'id': fields.String,
@@ -27,11 +23,7 @@ recommended_app_fields = {
     'privacy_policy': fields.String,
     'category': fields.String,
     'position': fields.Integer,
-    'is_listed': fields.Boolean,
-    'install_count': fields.Integer,
-    'installed': fields.Boolean,
-    'editable': fields.Boolean,
-    'is_agent': fields.Boolean
+    'is_listed': fields.Boolean
 }
 
 recommended_app_list_fields = {
@@ -45,102 +37,27 @@ class RecommendedAppListApi(Resource):
     @account_initialization_required
     @marshal_with(recommended_app_list_fields)
     def get(self):
-        language_prefix = current_user.interface_language if current_user.interface_language else languages[0]
+        # language args
+        parser = reqparse.RequestParser()
+        parser.add_argument('language', type=str, location='args')
+        args = parser.parse_args()
 
-        recommended_apps = db.session.query(RecommendedApp).filter(
-            RecommendedApp.is_listed == True,
-            RecommendedApp.language == language_prefix
-        ).all()
+        if args.get('language') and args.get('language') in languages:
+            language_prefix = args.get('language')
+        elif current_user and current_user.interface_language:
+            language_prefix = current_user.interface_language
+        else:
+            language_prefix = languages[0]
 
-        if len(recommended_apps) == 0:
-            recommended_apps = db.session.query(RecommendedApp).filter(
-                RecommendedApp.is_listed == True,
-                RecommendedApp.language == languages[0]
-            ).all()
-
-        categories = set()
-        current_user.role = TenantService.get_user_role(current_user, current_user.current_tenant)
-        recommended_apps_result = []
-        for recommended_app in recommended_apps:
-            installed = db.session.query(InstalledApp).filter(
-                and_(
-                    InstalledApp.app_id == recommended_app.app_id,
-                    InstalledApp.tenant_id == current_user.current_tenant_id
-                )
-            ).first() is not None
-
-            app = recommended_app.app
-            if not app or not app.is_public:
-                continue
-
-            site = app.site
-            if not site:
-                continue
-
-            recommended_app_result = {
-                'id': recommended_app.id,
-                'app': app,
-                'app_id': recommended_app.app_id,
-                'description': site.description,
-                'copyright': site.copyright,
-                'privacy_policy': site.privacy_policy,
-                'category': recommended_app.category,
-                'position': recommended_app.position,
-                'is_listed': recommended_app.is_listed,
-                'install_count': recommended_app.install_count,
-                'installed': installed,
-                'editable': current_user.role in ['owner', 'admin'],
-                "is_agent": app.is_agent
-            }
-            recommended_apps_result.append(recommended_app_result)
-
-            categories.add(recommended_app.category)  # add category to categories
-
-        return {'recommended_apps': recommended_apps_result, 'categories': list(categories)}
+        return RecommendedAppService.get_recommended_apps_and_categories(language_prefix)
 
 
 class RecommendedAppApi(Resource):
-    model_config_fields = {
-        'opening_statement': fields.String,
-        'suggested_questions': fields.Raw(attribute='suggested_questions_list'),
-        'suggested_questions_after_answer': fields.Raw(attribute='suggested_questions_after_answer_dict'),
-        'more_like_this': fields.Raw(attribute='more_like_this_dict'),
-        'model': fields.Raw(attribute='model_dict'),
-        'user_input_form': fields.Raw(attribute='user_input_form_list'),
-        'pre_prompt': fields.String,
-        'agent_mode': fields.Raw(attribute='agent_mode_dict'),
-    }
-
-    app_simple_detail_fields = {
-        'id': fields.String,
-        'name': fields.String,
-        'icon': fields.String,
-        'icon_background': fields.String,
-        'mode': fields.String,
-        'app_model_config': fields.Nested(model_config_fields),
-    }
-
     @login_required
     @account_initialization_required
-    @marshal_with(app_simple_detail_fields)
     def get(self, app_id):
         app_id = str(app_id)
-
-        # is in public recommended list
-        recommended_app = db.session.query(RecommendedApp).filter(
-            RecommendedApp.is_listed == True,
-            RecommendedApp.app_id == app_id
-        ).first()
-
-        if not recommended_app:
-            raise AppNotFoundError
-
-        # get app detail
-        app = db.session.query(App).filter(App.id == app_id).first()
-        if not app or not app.is_public:
-            raise AppNotFoundError
-
-        return app
+        return RecommendedAppService.get_recommend_app_detail(app_id)
 
 
 api.add_resource(RecommendedAppListApi, '/explore/apps')

+ 85 - 0
api/controllers/console/explore/workflow.py

@@ -0,0 +1,85 @@
+import logging
+
+from flask_restful import reqparse
+from werkzeug.exceptions import InternalServerError
+
+from controllers.console import api
+from controllers.console.app.error import (
+    CompletionRequestError,
+    ProviderModelCurrentlyNotSupportError,
+    ProviderNotInitializeError,
+    ProviderQuotaExceededError,
+)
+from controllers.console.explore.error import NotWorkflowAppError
+from controllers.console.explore.wraps import InstalledAppResource
+from core.app.apps.base_app_queue_manager import AppQueueManager
+from core.app.entities.app_invoke_entities import InvokeFrom
+from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
+from core.model_runtime.errors.invoke import InvokeError
+from libs import helper
+from libs.login import current_user
+from models.model import AppMode, InstalledApp
+from services.app_generate_service import AppGenerateService
+
+logger = logging.getLogger(__name__)
+
+
+class InstalledAppWorkflowRunApi(InstalledAppResource):
+    def post(self, installed_app: InstalledApp):
+        """
+        Run workflow
+        """
+        app_model = installed_app.app
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode != AppMode.WORKFLOW:
+            raise NotWorkflowAppError()
+
+        parser = reqparse.RequestParser()
+        parser.add_argument('inputs', type=dict, required=True, nullable=False, location='json')
+        parser.add_argument('files', type=list, required=False, location='json')
+        args = parser.parse_args()
+
+        try:
+            response = AppGenerateService.generate(
+                app_model=app_model,
+                user=current_user,
+                args=args,
+                invoke_from=InvokeFrom.EXPLORE,
+                streaming=True
+            )
+
+            return helper.compact_generate_response(response)
+        except ProviderTokenNotInitError as ex:
+            raise ProviderNotInitializeError(ex.description)
+        except QuotaExceededError:
+            raise ProviderQuotaExceededError()
+        except ModelCurrentlyNotSupportError:
+            raise ProviderModelCurrentlyNotSupportError()
+        except InvokeError as e:
+            raise CompletionRequestError(e.description)
+        except ValueError as e:
+            raise e
+        except Exception as e:
+            logging.exception("internal server error.")
+            raise InternalServerError()
+
+
+class InstalledAppWorkflowTaskStopApi(InstalledAppResource):
+    def post(self, installed_app: InstalledApp, task_id: str):
+        """
+        Stop workflow task
+        """
+        app_model = installed_app.app
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode != AppMode.WORKFLOW:
+            raise NotWorkflowAppError()
+
+        AppQueueManager.set_stop_flag(task_id, InvokeFrom.EXPLORE, current_user.id)
+
+        return {
+            "result": "success"
+        }
+
+
+api.add_resource(InstalledAppWorkflowRunApi, '/installed-apps/<uuid:installed_app_id>/workflows/run')
+api.add_resource(InstalledAppWorkflowTaskStopApi, '/installed-apps/<uuid:installed_app_id>/workflows/tasks/<string:task_id>/stop')

+ 17 - 0
api/controllers/console/ping.py

@@ -0,0 +1,17 @@
+from flask_restful import Resource
+
+from controllers.console import api
+
+
+class PingApi(Resource):
+
+    def get(self):
+        """
+        For connection health check
+        """
+        return {
+            "result": "pong"
+        }
+
+
+api.add_resource(PingApi, '/ping')

+ 1 - 14
api/controllers/console/workspace/account.py

@@ -16,26 +16,13 @@ from controllers.console.workspace.error import (
 )
 from controllers.console.wraps import account_initialization_required
 from extensions.ext_database import db
+from fields.member_fields import account_fields
 from libs.helper import TimestampField, timezone
 from libs.login import login_required
 from models.account import AccountIntegrate, InvitationCode
 from services.account_service import AccountService
 from services.errors.account import CurrentPasswordIncorrectError as ServiceCurrentPasswordIncorrectError
 
-account_fields = {
-    'id': fields.String,
-    'name': fields.String,
-    'avatar': fields.String,
-    'email': fields.String,
-    'is_password_set': fields.Boolean,
-    'interface_language': fields.String,
-    'interface_theme': fields.String,
-    'timezone': fields.String,
-    'last_login_at': TimestampField,
-    'last_login_ip': fields.String,
-    'created_at': TimestampField
-}
-
 
 class AccountInitApi(Resource):
 

+ 3 - 18
api/controllers/console/workspace/members.py

@@ -1,33 +1,18 @@
 from flask import current_app
 from flask_login import current_user
-from flask_restful import Resource, abort, fields, marshal_with, reqparse
+from flask_restful import Resource, abort, marshal_with, reqparse
 
 import services
 from controllers.console import api
 from controllers.console.setup import setup_required
 from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
 from extensions.ext_database import db
-from libs.helper import TimestampField
+from fields.member_fields import account_with_role_list_fields
 from libs.login import login_required
 from models.account import Account
 from services.account_service import RegisterService, TenantService
 from services.errors.account import AccountAlreadyInTenantError
 
-account_fields = {
-    'id': fields.String,
-    'name': fields.String,
-    'avatar': fields.String,
-    'email': fields.String,
-    'last_login_at': TimestampField,
-    'created_at': TimestampField,
-    'role': fields.String,
-    'status': fields.String,
-}
-
-account_list_fields = {
-    'accounts': fields.List(fields.Nested(account_fields))
-}
-
 
 class MemberListApi(Resource):
     """List all members of current tenant."""
@@ -35,7 +20,7 @@ class MemberListApi(Resource):
     @setup_required
     @login_required
     @account_initialization_required
-    @marshal_with(account_list_fields)
+    @marshal_with(account_with_role_list_fields)
     def get(self):
         members = TenantService.get_tenant_members(current_user.current_tenant)
         return {'result': 'success', 'accounts': members}, 200

+ 54 - 9
api/controllers/console/workspace/tool_providers.py

@@ -8,6 +8,7 @@ from werkzeug.exceptions import Forbidden
 from controllers.console import api
 from controllers.console.setup import setup_required
 from controllers.console.wraps import account_initialization_required
+from core.model_runtime.utils.encoders import jsonable_encoder
 from libs.login import login_required
 from services.tools_manage_service import ToolManageService
 
@@ -30,11 +31,11 @@ class ToolBuiltinProviderListToolsApi(Resource):
         user_id = current_user.id
         tenant_id = current_user.current_tenant_id
 
-        return ToolManageService.list_builtin_tool_provider_tools(
+        return jsonable_encoder(ToolManageService.list_builtin_tool_provider_tools(
             user_id,
             tenant_id,
             provider,
-        )
+        ))
 
 class ToolBuiltinProviderDeleteApi(Resource):
     @setup_required
@@ -75,13 +76,27 @@ class ToolBuiltinProviderUpdateApi(Resource):
             provider,
             args['credentials'],
         )
+    
+class ToolBuiltinProviderGetCredentialsApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    def get(self, provider):
+        user_id = current_user.id
+        tenant_id = current_user.current_tenant_id
+
+        return ToolManageService.get_builtin_tool_provider_credentials(
+            user_id,
+            tenant_id,
+            provider,
+        )
 
 class ToolBuiltinProviderIconApi(Resource):
     @setup_required
     def get(self, provider):
-        icon_bytes, minetype = ToolManageService.get_builtin_tool_provider_icon(provider)
+        icon_bytes, mimetype = ToolManageService.get_builtin_tool_provider_icon(provider)
         icon_cache_max_age = int(current_app.config.get('TOOL_ICON_CACHE_MAX_AGE'))
-        return send_file(io.BytesIO(icon_bytes), mimetype=minetype, max_age=icon_cache_max_age)
+        return send_file(io.BytesIO(icon_bytes), mimetype=mimetype, max_age=icon_cache_max_age)
 
 class ToolModelProviderIconApi(Resource):
     @setup_required
@@ -102,11 +117,11 @@ class ToolModelProviderListToolsApi(Resource):
 
         args = parser.parse_args()
 
-        return ToolManageService.list_model_tool_provider_tools(
+        return jsonable_encoder(ToolManageService.list_model_tool_provider_tools(
             user_id,
             tenant_id,
             args['provider'],
-        )
+        ))
 
 class ToolApiProviderAddApi(Resource):
     @setup_required
@@ -171,11 +186,11 @@ class ToolApiProviderListToolsApi(Resource):
 
         args = parser.parse_args()
 
-        return ToolManageService.list_api_tool_provider_tools(
+        return jsonable_encoder(ToolManageService.list_api_tool_provider_tools(
             user_id,
             tenant_id,
             args['provider'],
-        )
+        ))
 
 class ToolApiProviderUpdateApi(Resource):
     @setup_required
@@ -302,10 +317,37 @@ class ToolApiProviderPreviousTestApi(Resource):
             args['schema'],
         )
 
+class ToolBuiltinListApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    def get(self):
+        user_id = current_user.id
+        tenant_id = current_user.current_tenant_id
+
+        return jsonable_encoder([provider.to_dict() for provider in ToolManageService.list_builtin_tools(
+            user_id,
+            tenant_id,
+        )])
+    
+class ToolApiListApi(Resource):
+    @setup_required
+    @login_required
+    @account_initialization_required
+    def get(self):
+        user_id = current_user.id
+        tenant_id = current_user.current_tenant_id
+
+        return jsonable_encoder([provider.to_dict() for provider in ToolManageService.list_api_tools(
+            user_id,
+            tenant_id,
+        )])
+
 api.add_resource(ToolProviderListApi, '/workspaces/current/tool-providers')
 api.add_resource(ToolBuiltinProviderListToolsApi, '/workspaces/current/tool-provider/builtin/<provider>/tools')
 api.add_resource(ToolBuiltinProviderDeleteApi, '/workspaces/current/tool-provider/builtin/<provider>/delete')
 api.add_resource(ToolBuiltinProviderUpdateApi, '/workspaces/current/tool-provider/builtin/<provider>/update')
+api.add_resource(ToolBuiltinProviderGetCredentialsApi, '/workspaces/current/tool-provider/builtin/<provider>/credentials')
 api.add_resource(ToolBuiltinProviderCredentialsSchemaApi, '/workspaces/current/tool-provider/builtin/<provider>/credentials_schema')
 api.add_resource(ToolBuiltinProviderIconApi, '/workspaces/current/tool-provider/builtin/<provider>/icon')
 api.add_resource(ToolModelProviderIconApi, '/workspaces/current/tool-provider/model/<provider>/icon')
@@ -313,8 +355,11 @@ api.add_resource(ToolModelProviderListToolsApi, '/workspaces/current/tool-provid
 api.add_resource(ToolApiProviderAddApi, '/workspaces/current/tool-provider/api/add')
 api.add_resource(ToolApiProviderGetRemoteSchemaApi, '/workspaces/current/tool-provider/api/remote')
 api.add_resource(ToolApiProviderListToolsApi, '/workspaces/current/tool-provider/api/tools')
-api.add_resource(ToolApiProviderUpdateApi, '/workspaces/current/tool-provider/api/update') 
+api.add_resource(ToolApiProviderUpdateApi, '/workspaces/current/tool-provider/api/update')
 api.add_resource(ToolApiProviderDeleteApi, '/workspaces/current/tool-provider/api/delete')
 api.add_resource(ToolApiProviderGetApi, '/workspaces/current/tool-provider/api/get')
 api.add_resource(ToolApiProviderSchemaApi, '/workspaces/current/tool-provider/api/schema')
 api.add_resource(ToolApiProviderPreviousTestApi, '/workspaces/current/tool-provider/api/test/pre')
+
+api.add_resource(ToolBuiltinListApi, '/workspaces/current/tools/builtin')
+api.add_resource(ToolApiListApi, '/workspaces/current/tools/api')

+ 1 - 1
api/controllers/files/tool_files.py

@@ -27,7 +27,7 @@ class ToolFilePreviewApi(Resource):
             raise Forbidden('Invalid request.')
         
         try:
-            result = ToolFileManager.get_file_generator_by_message_file_id(
+            result = ToolFileManager.get_file_generator_by_tool_file_id(
                 file_id,
             )
 

+ 1 - 1
api/controllers/service_api/__init__.py

@@ -7,5 +7,5 @@ api = ExternalApi(bp)
 
 
 from . import index
-from .app import app, audio, completion, conversation, file, message
+from .app import app, audio, completion, conversation, file, message, workflow
 from .dataset import dataset, document, segment

+ 35 - 45
api/controllers/service_api/app/app.py

@@ -4,10 +4,12 @@ from flask import current_app
 from flask_restful import fields, marshal_with, Resource
 
 from controllers.service_api import api
+from controllers.service_api.app.error import AppUnavailableError
 from controllers.service_api.wraps import validate_app_token
 from extensions.ext_database import db
-from models.model import App, AppModelConfig
+from models.model import App, AppModelConfig, AppMode
 from models.tools import ApiToolProvider
+from services.app_service import AppService
 
 
 class AppParameterApi(Resource):
@@ -46,62 +48,50 @@ class AppParameterApi(Resource):
     @marshal_with(parameters_fields)
     def get(self, app_model: App):
         """Retrieve app parameters."""
-        app_model_config = app_model.app_model_config
+        if app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]:
+            workflow = app_model.workflow
+            if workflow is None:
+                raise AppUnavailableError()
+
+            features_dict = workflow.features_dict
+            user_input_form = workflow.user_input_form(to_old_structure=True)
+        else:
+            app_model_config = app_model.app_model_config
+            features_dict = app_model_config.to_dict()
+
+            user_input_form = features_dict.get('user_input_form', [])
 
         return {
-            'opening_statement': app_model_config.opening_statement,
-            'suggested_questions': app_model_config.suggested_questions_list,
-            'suggested_questions_after_answer': app_model_config.suggested_questions_after_answer_dict,
-            'speech_to_text': app_model_config.speech_to_text_dict,
-            'text_to_speech': app_model_config.text_to_speech_dict,
-            'retriever_resource': app_model_config.retriever_resource_dict,
-            'annotation_reply': app_model_config.annotation_reply_dict,
-            'more_like_this': app_model_config.more_like_this_dict,
-            'user_input_form': app_model_config.user_input_form_list,
-            'sensitive_word_avoidance': app_model_config.sensitive_word_avoidance_dict,
-            'file_upload': app_model_config.file_upload_dict,
+            'opening_statement': features_dict.get('opening_statement'),
+            'suggested_questions': features_dict.get('suggested_questions', []),
+            'suggested_questions_after_answer': features_dict.get('suggested_questions_after_answer',
+                                                                  {"enabled": False}),
+            'speech_to_text': features_dict.get('speech_to_text', {"enabled": False}),
+            'text_to_speech': features_dict.get('text_to_speech', {"enabled": False}),
+            'retriever_resource': features_dict.get('retriever_resource', {"enabled": False}),
+            'annotation_reply': features_dict.get('annotation_reply', {"enabled": False}),
+            'more_like_this': features_dict.get('more_like_this', {"enabled": False}),
+            'user_input_form': user_input_form,
+            'sensitive_word_avoidance': features_dict.get('sensitive_word_avoidance',
+                                                          {"enabled": False, "type": "", "configs": []}),
+            'file_upload': features_dict.get('file_upload', {"image": {
+                                                     "enabled": False,
+                                                     "number_limits": 3,
+                                                     "detail": "high",
+                                                     "transfer_methods": ["remote_url", "local_file"]
+                                                 }}),
             'system_parameters': {
                 'image_file_size_limit': current_app.config.get('UPLOAD_IMAGE_FILE_SIZE_LIMIT')
             }
         }
 
+
 class AppMetaApi(Resource):
     @validate_app_token
     def get(self, app_model: App):
         """Get app meta"""
-        app_model_config: AppModelConfig = app_model.app_model_config
-
-        agent_config = app_model_config.agent_mode_dict or {}
-        meta = {
-            'tool_icons': {}
-        }
-
-        # get all tools
-        tools = agent_config.get('tools', [])
-        url_prefix = (current_app.config.get("CONSOLE_API_URL")
-                  + "/console/api/workspaces/current/tool-provider/builtin/")
-        for tool in tools:
-            keys = list(tool.keys())
-            if len(keys) >= 4:
-                # current tool standard
-                provider_type = tool.get('provider_type')
-                provider_id = tool.get('provider_id')
-                tool_name = tool.get('tool_name')
-                if provider_type == 'builtin':
-                    meta['tool_icons'][tool_name] = url_prefix + provider_id + '/icon'
-                elif provider_type == 'api':
-                    try:
-                        provider: ApiToolProvider = db.session.query(ApiToolProvider).filter(
-                            ApiToolProvider.id == provider_id
-                        )
-                        meta['tool_icons'][tool_name] = json.loads(provider.icon)
-                    except:
-                        meta['tool_icons'][tool_name] =  {
-                            "background": "#252525",
-                            "content": "\ud83d\ude01"
-                        }
+        return AppService().get_app_meta(app_model)
 
-        return meta
 
 api.add_resource(AppParameterApi, '/parameters')
 api.add_resource(AppMetaApi, '/meta')

+ 7 - 11
api/controllers/service_api/app/audio.py

@@ -20,7 +20,7 @@ from controllers.service_api.app.error import (
 from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
 from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
 from core.model_runtime.errors.invoke import InvokeError
-from models.model import App, AppModelConfig, EndUser
+from models.model import App, EndUser
 from services.audio_service import AudioService
 from services.errors.audio import (
     AudioTooLargeServiceError,
@@ -33,18 +33,13 @@ from services.errors.audio import (
 class AudioApi(Resource):
     @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.FORM))
     def post(self, app_model: App, end_user: EndUser):
-        app_model_config: AppModelConfig = app_model.app_model_config
-
-        if not app_model_config.speech_to_text_dict['enabled']:
-            raise AppUnavailableError()
-
         file = request.files['file']
 
         try:
             response = AudioService.transcript_asr(
-                tenant_id=app_model.tenant_id,
+                app_model=app_model,
                 file=file,
-                end_user=end_user.get_id()
+                end_user=end_user
             )
 
             return response
@@ -79,15 +74,16 @@ class TextApi(Resource):
     def post(self, app_model: App, end_user: EndUser):
         parser = reqparse.RequestParser()
         parser.add_argument('text', type=str, required=True, nullable=False, location='json')
+        parser.add_argument('voice', type=str, location='json')
         parser.add_argument('streaming', type=bool, required=False, nullable=False, location='json')
         args = parser.parse_args()
 
         try:
             response = AudioService.transcript_tts(
-                tenant_id=app_model.tenant_id,
+                app_model=app_model,
                 text=args['text'],
-                end_user=end_user.get_id(),
-                voice=app_model.app_model_config.text_to_speech_dict.get('voice'),
+                end_user=end_user,
+                voice=args.get('voice'),
                 streaming=args['streaming']
             )
 

+ 15 - 27
api/controllers/service_api/app/completion.py

@@ -1,9 +1,5 @@
-import json
 import logging
-from collections.abc import Generator
-from typing import Union
 
-from flask import Response, stream_with_context
 from flask_restful import Resource, reqparse
 from werkzeug.exceptions import InternalServerError, NotFound
 
@@ -19,13 +15,14 @@ from controllers.service_api.app.error import (
     ProviderQuotaExceededError,
 )
 from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
-from core.application_queue_manager import ApplicationQueueManager
-from core.entities.application_entities import InvokeFrom
+from core.app.apps.base_app_queue_manager import AppQueueManager
+from core.app.entities.app_invoke_entities import InvokeFrom
 from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
 from core.model_runtime.errors.invoke import InvokeError
+from libs import helper
 from libs.helper import uuid_value
-from models.model import App, EndUser
-from services.completion_service import CompletionService
+from models.model import App, AppMode, EndUser
+from services.app_generate_service import AppGenerateService
 
 
 class CompletionApi(Resource):
@@ -48,7 +45,7 @@ class CompletionApi(Resource):
         args['auto_generate_name'] = False
 
         try:
-            response = CompletionService.completion(
+            response = AppGenerateService.generate(
                 app_model=app_model,
                 user=end_user,
                 args=args,
@@ -56,7 +53,7 @@ class CompletionApi(Resource):
                 streaming=streaming,
             )
 
-            return compact_response(response)
+            return helper.compact_generate_response(response)
         except services.errors.conversation.ConversationNotExistsError:
             raise NotFound("Conversation Not Exists.")
         except services.errors.conversation.ConversationCompletedError:
@@ -85,7 +82,7 @@ class CompletionStopApi(Resource):
         if app_model.mode != 'completion':
             raise AppUnavailableError()
 
-        ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)
+        AppQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)
 
         return {'result': 'success'}, 200
 
@@ -93,7 +90,8 @@ class CompletionStopApi(Resource):
 class ChatApi(Resource):
     @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
     def post(self, app_model: App, end_user: EndUser):
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         parser = reqparse.RequestParser()
@@ -110,7 +108,7 @@ class ChatApi(Resource):
         streaming = args['response_mode'] == 'streaming'
 
         try:
-            response = CompletionService.completion(
+            response = AppGenerateService.generate(
                 app_model=app_model,
                 user=end_user,
                 args=args,
@@ -118,7 +116,7 @@ class ChatApi(Resource):
                 streaming=streaming
             )
 
-            return compact_response(response)
+            return helper.compact_generate_response(response)
         except services.errors.conversation.ConversationNotExistsError:
             raise NotFound("Conversation Not Exists.")
         except services.errors.conversation.ConversationCompletedError:
@@ -144,25 +142,15 @@ class ChatApi(Resource):
 class ChatStopApi(Resource):
     @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
     def post(self, app_model: App, end_user: EndUser, task_id):
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
-        ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)
+        AppQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)
 
         return {'result': 'success'}, 200
 
 
-def compact_response(response: Union[dict, Generator]) -> Response:
-    if isinstance(response, dict):
-        return Response(response=json.dumps(response), status=200, mimetype='application/json')
-    else:
-        def generate() -> Generator:
-            yield from response
-
-        return Response(stream_with_context(generate()), status=200,
-                        mimetype='text/event-stream')
-
-
 api.add_resource(CompletionApi, '/completion-messages')
 api.add_resource(CompletionStopApi, '/completion-messages/<string:task_id>/stop')
 api.add_resource(ChatApi, '/chat-messages')

+ 8 - 4
api/controllers/service_api/app/conversation.py

@@ -8,7 +8,7 @@ from controllers.service_api.app.error import NotChatAppError
 from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
 from fields.conversation_fields import conversation_infinite_scroll_pagination_fields, simple_conversation_fields
 from libs.helper import uuid_value
-from models.model import App, EndUser
+from models.model import App, AppMode, EndUser
 from services.conversation_service import ConversationService
 
 
@@ -17,7 +17,8 @@ class ConversationApi(Resource):
     @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.QUERY))
     @marshal_with(conversation_infinite_scroll_pagination_fields)
     def get(self, app_model: App, end_user: EndUser):
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         parser = reqparse.RequestParser()
@@ -30,11 +31,13 @@ class ConversationApi(Resource):
         except services.errors.conversation.LastConversationNotExistsError:
             raise NotFound("Last Conversation Not Exists.")
 
+
 class ConversationDetailApi(Resource):
     @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON))
     @marshal_with(simple_conversation_fields)
     def delete(self, app_model: App, end_user: EndUser, c_id):
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         conversation_id = str(c_id)
@@ -51,7 +54,8 @@ class ConversationRenameApi(Resource):
     @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON))
     @marshal_with(simple_conversation_fields)
     def post(self, app_model: App, end_user: EndUser, c_id):
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         conversation_id = str(c_id)

+ 7 - 1
api/controllers/service_api/app/error.py

@@ -15,7 +15,13 @@ class NotCompletionAppError(BaseHTTPException):
 
 class NotChatAppError(BaseHTTPException):
     error_code = 'not_chat_app'
-    description = "Please check if your Chat app mode matches the right API route."
+    description = "Please check if your app mode matches the right API route."
+    code = 400
+
+
+class NotWorkflowAppError(BaseHTTPException):
+    error_code = 'not_workflow_app'
+    description = "Please check if your app mode matches the right API route."
     code = 400
 
 

+ 20 - 7
api/controllers/service_api/app/message.py

@@ -1,14 +1,18 @@
+import logging
+
 from flask_restful import Resource, fields, marshal_with, reqparse
 from flask_restful.inputs import int_range
-from werkzeug.exceptions import NotFound
+from werkzeug.exceptions import BadRequest, InternalServerError, NotFound
 
 import services
 from controllers.service_api import api
 from controllers.service_api.app.error import NotChatAppError
 from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
+from core.app.entities.app_invoke_entities import InvokeFrom
 from fields.conversation_fields import message_file_fields
 from libs.helper import TimestampField, uuid_value
-from models.model import App, EndUser
+from models.model import App, AppMode, EndUser
+from services.errors.message import SuggestedQuestionsAfterAnswerDisabledError
 from services.message_service import MessageService
 
 
@@ -54,12 +58,14 @@ class MessageListApi(Resource):
         'conversation_id': fields.String,
         'inputs': fields.Raw,
         'query': fields.String,
-        'answer': fields.String,
+        'answer': fields.String(attribute='re_sign_file_url_answer'),
         'message_files': fields.List(fields.Nested(message_file_fields), attribute='files'),
         'feedback': fields.Nested(feedback_fields, attribute='user_feedback', allow_null=True),
         'retriever_resources': fields.List(fields.Nested(retriever_resource_fields)),
         'created_at': TimestampField,
-        'agent_thoughts': fields.List(fields.Nested(agent_thought_fields))
+        'agent_thoughts': fields.List(fields.Nested(agent_thought_fields)),
+        'status': fields.String,
+        'error': fields.String,
     }
 
     message_infinite_scroll_pagination_fields = {
@@ -71,7 +77,8 @@ class MessageListApi(Resource):
     @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.QUERY))
     @marshal_with(message_infinite_scroll_pagination_fields)
     def get(self, app_model: App, end_user: EndUser):
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         parser = reqparse.RequestParser()
@@ -110,7 +117,8 @@ class MessageSuggestedApi(Resource):
     @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.QUERY))
     def get(self, app_model: App, end_user: EndUser, message_id):
         message_id = str(message_id)
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         try:
@@ -118,10 +126,15 @@ class MessageSuggestedApi(Resource):
                 app_model=app_model,
                 user=end_user,
                 message_id=message_id,
-                check_enabled=False
+                invoke_from=InvokeFrom.SERVICE_API
             )
         except services.errors.message.MessageNotExistsError:
             raise NotFound("Message Not Exists.")
+        except SuggestedQuestionsAfterAnswerDisabledError:
+            raise BadRequest("Message Not Exists.")
+        except Exception:
+            logging.exception("internal server error.")
+            raise InternalServerError()
 
         return {'result': 'success', 'data': questions}
 

+ 87 - 0
api/controllers/service_api/app/workflow.py

@@ -0,0 +1,87 @@
+import logging
+
+from flask_restful import Resource, reqparse
+from werkzeug.exceptions import InternalServerError
+
+from controllers.service_api import api
+from controllers.service_api.app.error import (
+    CompletionRequestError,
+    NotWorkflowAppError,
+    ProviderModelCurrentlyNotSupportError,
+    ProviderNotInitializeError,
+    ProviderQuotaExceededError,
+)
+from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
+from core.app.apps.base_app_queue_manager import AppQueueManager
+from core.app.entities.app_invoke_entities import InvokeFrom
+from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
+from core.model_runtime.errors.invoke import InvokeError
+from libs import helper
+from models.model import App, AppMode, EndUser
+from services.app_generate_service import AppGenerateService
+
+logger = logging.getLogger(__name__)
+
+
+class WorkflowRunApi(Resource):
+    @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
+    def post(self, app_model: App, end_user: EndUser):
+        """
+        Run workflow
+        """
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode != AppMode.WORKFLOW:
+            raise NotWorkflowAppError()
+
+        parser = reqparse.RequestParser()
+        parser.add_argument('inputs', type=dict, required=True, nullable=False, location='json')
+        parser.add_argument('files', type=list, required=False, location='json')
+        parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
+        args = parser.parse_args()
+
+        streaming = args.get('response_mode') == 'streaming'
+
+        try:
+            response = AppGenerateService.generate(
+                app_model=app_model,
+                user=end_user,
+                args=args,
+                invoke_from=InvokeFrom.SERVICE_API,
+                streaming=streaming
+            )
+
+            return helper.compact_generate_response(response)
+        except ProviderTokenNotInitError as ex:
+            raise ProviderNotInitializeError(ex.description)
+        except QuotaExceededError:
+            raise ProviderQuotaExceededError()
+        except ModelCurrentlyNotSupportError:
+            raise ProviderModelCurrentlyNotSupportError()
+        except InvokeError as e:
+            raise CompletionRequestError(e.description)
+        except ValueError as e:
+            raise e
+        except Exception as e:
+            logging.exception("internal server error.")
+            raise InternalServerError()
+
+
+class WorkflowTaskStopApi(Resource):
+    @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
+    def post(self, app_model: App, end_user: EndUser, task_id: str):
+        """
+        Stop workflow task
+        """
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode != AppMode.WORKFLOW:
+            raise NotWorkflowAppError()
+
+        AppQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)
+
+        return {
+            "result": "success"
+        }
+
+
+api.add_resource(WorkflowRunApi, '/workflows/run')
+api.add_resource(WorkflowTaskStopApi, '/workflows/tasks/<string:task_id>/stop')

+ 1 - 1
api/controllers/web/__init__.py

@@ -6,4 +6,4 @@ bp = Blueprint('web', __name__, url_prefix='/api')
 api = ExternalApi(bp)
 
 
-from . import app, audio, completion, conversation, file, message, passport, saved_message, site
+from . import app, audio, completion, conversation, file, message, passport, saved_message, site, workflow

+ 35 - 45
api/controllers/web/app.py

@@ -4,10 +4,12 @@ from flask import current_app
 from flask_restful import fields, marshal_with
 
 from controllers.web import api
+from controllers.web.error import AppUnavailableError
 from controllers.web.wraps import WebApiResource
 from extensions.ext_database import db
-from models.model import App, AppModelConfig
+from models.model import App, AppModelConfig, AppMode
 from models.tools import ApiToolProvider
+from services.app_service import AppService
 
 
 class AppParameterApi(WebApiResource):
@@ -44,61 +46,49 @@ class AppParameterApi(WebApiResource):
     @marshal_with(parameters_fields)
     def get(self, app_model: App, end_user):
         """Retrieve app parameters."""
-        app_model_config = app_model.app_model_config
+        if app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]:
+            workflow = app_model.workflow
+            if workflow is None:
+                raise AppUnavailableError()
+
+            features_dict = workflow.features_dict
+            user_input_form = workflow.user_input_form(to_old_structure=True)
+        else:
+            app_model_config = app_model.app_model_config
+            features_dict = app_model_config.to_dict()
+
+            user_input_form = features_dict.get('user_input_form', [])
 
         return {
-            'opening_statement': app_model_config.opening_statement,
-            'suggested_questions': app_model_config.suggested_questions_list,
-            'suggested_questions_after_answer': app_model_config.suggested_questions_after_answer_dict,
-            'speech_to_text': app_model_config.speech_to_text_dict,
-            'text_to_speech': app_model_config.text_to_speech_dict,
-            'retriever_resource': app_model_config.retriever_resource_dict,
-            'annotation_reply': app_model_config.annotation_reply_dict,
-            'more_like_this': app_model_config.more_like_this_dict,
-            'user_input_form': app_model_config.user_input_form_list,
-            'sensitive_word_avoidance': app_model_config.sensitive_word_avoidance_dict,
-            'file_upload': app_model_config.file_upload_dict,
+            'opening_statement': features_dict.get('opening_statement'),
+            'suggested_questions': features_dict.get('suggested_questions', []),
+            'suggested_questions_after_answer': features_dict.get('suggested_questions_after_answer',
+                                                                  {"enabled": False}),
+            'speech_to_text': features_dict.get('speech_to_text', {"enabled": False}),
+            'text_to_speech': features_dict.get('text_to_speech', {"enabled": False}),
+            'retriever_resource': features_dict.get('retriever_resource', {"enabled": False}),
+            'annotation_reply': features_dict.get('annotation_reply', {"enabled": False}),
+            'more_like_this': features_dict.get('more_like_this', {"enabled": False}),
+            'user_input_form': user_input_form,
+            'sensitive_word_avoidance': features_dict.get('sensitive_word_avoidance',
+                                                          {"enabled": False, "type": "", "configs": []}),
+            'file_upload': features_dict.get('file_upload', {"image": {
+                "enabled": False,
+                "number_limits": 3,
+                "detail": "high",
+                "transfer_methods": ["remote_url", "local_file"]
+            }}),
             'system_parameters': {
                 'image_file_size_limit': current_app.config.get('UPLOAD_IMAGE_FILE_SIZE_LIMIT')
             }
         }
 
+
 class AppMeta(WebApiResource):
     def get(self, app_model: App, end_user):
         """Get app meta"""
-        app_model_config: AppModelConfig = app_model.app_model_config
-
-        agent_config = app_model_config.agent_mode_dict or {}
-        meta = {
-            'tool_icons': {}
-        }
-
-        # get all tools
-        tools = agent_config.get('tools', [])
-        url_prefix = (current_app.config.get("CONSOLE_API_URL")
-                  + "/console/api/workspaces/current/tool-provider/builtin/")
-        for tool in tools:
-            keys = list(tool.keys())
-            if len(keys) >= 4:
-                # current tool standard
-                provider_type = tool.get('provider_type')
-                provider_id = tool.get('provider_id')
-                tool_name = tool.get('tool_name')
-                if provider_type == 'builtin':
-                    meta['tool_icons'][tool_name] = url_prefix + provider_id + '/icon'
-                elif provider_type == 'api':
-                    try:
-                        provider: ApiToolProvider = db.session.query(ApiToolProvider).filter(
-                            ApiToolProvider.id == provider_id
-                        )
-                        meta['tool_icons'][tool_name] = json.loads(provider.icon)
-                    except:
-                        meta['tool_icons'][tool_name] =  {
-                            "background": "#252525",
-                            "content": "\ud83d\ude01"
-                        }
+        return AppService().get_app_meta(app_model)
 
-        return meta
 
 api.add_resource(AppParameterApi, '/parameters')
 api.add_resource(AppMeta, '/meta')

+ 4 - 14
api/controllers/web/audio.py

@@ -19,7 +19,7 @@ from controllers.web.error import (
 from controllers.web.wraps import WebApiResource
 from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
 from core.model_runtime.errors.invoke import InvokeError
-from models.model import App, AppModelConfig
+from models.model import App
 from services.audio_service import AudioService
 from services.errors.audio import (
     AudioTooLargeServiceError,
@@ -31,16 +31,11 @@ from services.errors.audio import (
 
 class AudioApi(WebApiResource):
     def post(self, app_model: App, end_user):
-        app_model_config: AppModelConfig = app_model.app_model_config
-
-        if not app_model_config.speech_to_text_dict['enabled']:
-            raise AppUnavailableError()
-
         file = request.files['file']
 
         try:
             response = AudioService.transcript_asr(
-                tenant_id=app_model.tenant_id,
+                app_model=app_model,
                 file=file,
                 end_user=end_user
             )
@@ -74,17 +69,12 @@ class AudioApi(WebApiResource):
 
 class TextApi(WebApiResource):
     def post(self, app_model: App, end_user):
-        app_model_config: AppModelConfig = app_model.app_model_config
-
-        if not app_model_config.text_to_speech_dict['enabled']:
-            raise AppUnavailableError()
-
         try:
             response = AudioService.transcript_tts(
-                tenant_id=app_model.tenant_id,
+                app_model=app_model,
                 text=request.form['text'],
                 end_user=end_user.external_user_id,
-                voice=request.form['voice'] if request.form['voice'] else app_model.app_model_config.text_to_speech_dict.get('voice'),
+                voice=request.form.get('voice'),
                 streaming=False
             )
 

+ 15 - 26
api/controllers/web/completion.py

@@ -1,9 +1,5 @@
-import json
 import logging
-from collections.abc import Generator
-from typing import Union
 
-from flask import Response, stream_with_context
 from flask_restful import reqparse
 from werkzeug.exceptions import InternalServerError, NotFound
 
@@ -20,12 +16,14 @@ from controllers.web.error import (
     ProviderQuotaExceededError,
 )
 from controllers.web.wraps import WebApiResource
-from core.application_queue_manager import ApplicationQueueManager
-from core.entities.application_entities import InvokeFrom
+from core.app.apps.base_app_queue_manager import AppQueueManager
+from core.app.entities.app_invoke_entities import InvokeFrom
 from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
 from core.model_runtime.errors.invoke import InvokeError
+from libs import helper
 from libs.helper import uuid_value
-from services.completion_service import CompletionService
+from models.model import AppMode
+from services.app_generate_service import AppGenerateService
 
 
 # define completion api for user
@@ -48,7 +46,7 @@ class CompletionApi(WebApiResource):
         args['auto_generate_name'] = False
 
         try:
-            response = CompletionService.completion(
+            response = AppGenerateService.generate(
                 app_model=app_model,
                 user=end_user,
                 args=args,
@@ -56,7 +54,7 @@ class CompletionApi(WebApiResource):
                 streaming=streaming
             )
 
-            return compact_response(response)
+            return helper.compact_generate_response(response)
         except services.errors.conversation.ConversationNotExistsError:
             raise NotFound("Conversation Not Exists.")
         except services.errors.conversation.ConversationCompletedError:
@@ -84,14 +82,15 @@ class CompletionStopApi(WebApiResource):
         if app_model.mode != 'completion':
             raise NotCompletionAppError()
 
-        ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.WEB_APP, end_user.id)
+        AppQueueManager.set_stop_flag(task_id, InvokeFrom.WEB_APP, end_user.id)
 
         return {'result': 'success'}, 200
 
 
 class ChatApi(WebApiResource):
     def post(self, app_model, end_user):
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         parser = reqparse.RequestParser()
@@ -108,7 +107,7 @@ class ChatApi(WebApiResource):
         args['auto_generate_name'] = False
 
         try:
-            response = CompletionService.completion(
+            response = AppGenerateService.generate(
                 app_model=app_model,
                 user=end_user,
                 args=args,
@@ -116,7 +115,7 @@ class ChatApi(WebApiResource):
                 streaming=streaming
             )
 
-            return compact_response(response)
+            return helper.compact_generate_response(response)
         except services.errors.conversation.ConversationNotExistsError:
             raise NotFound("Conversation Not Exists.")
         except services.errors.conversation.ConversationCompletedError:
@@ -141,25 +140,15 @@ class ChatApi(WebApiResource):
 
 class ChatStopApi(WebApiResource):
     def post(self, app_model, end_user, task_id):
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
-        ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.WEB_APP, end_user.id)
+        AppQueueManager.set_stop_flag(task_id, InvokeFrom.WEB_APP, end_user.id)
 
         return {'result': 'success'}, 200
 
 
-def compact_response(response: Union[dict, Generator]) -> Response:
-    if isinstance(response, dict):
-        return Response(response=json.dumps(response), status=200, mimetype='application/json')
-    else:
-        def generate() -> Generator:
-            yield from response
-
-        return Response(stream_with_context(generate()), status=200,
-                        mimetype='text/event-stream')
-
-
 api.add_resource(CompletionApi, '/completion-messages')
 api.add_resource(CompletionStopApi, '/completion-messages/<string:task_id>/stop')
 api.add_resource(ChatApi, '/chat-messages')

+ 11 - 5
api/controllers/web/conversation.py

@@ -7,6 +7,7 @@ from controllers.web.error import NotChatAppError
 from controllers.web.wraps import WebApiResource
 from fields.conversation_fields import conversation_infinite_scroll_pagination_fields, simple_conversation_fields
 from libs.helper import uuid_value
+from models.model import AppMode
 from services.conversation_service import ConversationService
 from services.errors.conversation import ConversationNotExistsError, LastConversationNotExistsError
 from services.web_conversation_service import WebConversationService
@@ -16,7 +17,8 @@ class ConversationListApi(WebApiResource):
 
     @marshal_with(conversation_infinite_scroll_pagination_fields)
     def get(self, app_model, end_user):
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         parser = reqparse.RequestParser()
@@ -43,7 +45,8 @@ class ConversationListApi(WebApiResource):
 
 class ConversationApi(WebApiResource):
     def delete(self, app_model, end_user, c_id):
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         conversation_id = str(c_id)
@@ -60,7 +63,8 @@ class ConversationRenameApi(WebApiResource):
 
     @marshal_with(simple_conversation_fields)
     def post(self, app_model, end_user, c_id):
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         conversation_id = str(c_id)
@@ -85,7 +89,8 @@ class ConversationRenameApi(WebApiResource):
 class ConversationPinApi(WebApiResource):
 
     def patch(self, app_model, end_user, c_id):
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         conversation_id = str(c_id)
@@ -100,7 +105,8 @@ class ConversationPinApi(WebApiResource):
 
 class ConversationUnPinApi(WebApiResource):
     def patch(self, app_model, end_user, c_id):
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         conversation_id = str(c_id)

+ 7 - 1
api/controllers/web/error.py

@@ -15,7 +15,13 @@ class NotCompletionAppError(BaseHTTPException):
 
 class NotChatAppError(BaseHTTPException):
     error_code = 'not_chat_app'
-    description = "Please check if your Chat app mode matches the right API route."
+    description = "Please check if your app mode matches the right API route."
+    code = 400
+
+
+class NotWorkflowAppError(BaseHTTPException):
+    error_code = 'not_workflow_app'
+    description = "Please check if your Workflow app mode matches the right API route."
     code = 400
 
 

+ 16 - 24
api/controllers/web/message.py

@@ -1,9 +1,5 @@
-import json
 import logging
-from collections.abc import Generator
-from typing import Union
 
-from flask import Response, stream_with_context
 from flask_restful import fields, marshal_with, reqparse
 from flask_restful.inputs import int_range
 from werkzeug.exceptions import InternalServerError, NotFound
@@ -21,13 +17,15 @@ from controllers.web.error import (
     ProviderQuotaExceededError,
 )
 from controllers.web.wraps import WebApiResource
-from core.entities.application_entities import InvokeFrom
+from core.app.entities.app_invoke_entities import InvokeFrom
 from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
 from core.model_runtime.errors.invoke import InvokeError
 from fields.conversation_fields import message_file_fields
 from fields.message_fields import agent_thought_fields
+from libs import helper
 from libs.helper import TimestampField, uuid_value
-from services.completion_service import CompletionService
+from models.model import AppMode
+from services.app_generate_service import AppGenerateService
 from services.errors.app import MoreLikeThisDisabledError
 from services.errors.conversation import ConversationNotExistsError
 from services.errors.message import MessageNotExistsError, SuggestedQuestionsAfterAnswerDisabledError
@@ -63,12 +61,14 @@ class MessageListApi(WebApiResource):
         'conversation_id': fields.String,
         'inputs': fields.Raw,
         'query': fields.String,
-        'answer': fields.String,
+        'answer': fields.String(attribute='re_sign_file_url_answer'),
         'message_files': fields.List(fields.Nested(message_file_fields), attribute='files'),
         'feedback': fields.Nested(feedback_fields, attribute='user_feedback', allow_null=True),
         'retriever_resources': fields.List(fields.Nested(retriever_resource_fields)),
         'created_at': TimestampField,
-        'agent_thoughts': fields.List(fields.Nested(agent_thought_fields))
+        'agent_thoughts': fields.List(fields.Nested(agent_thought_fields)),
+        'status': fields.String,
+        'error': fields.String,
     }
 
     message_infinite_scroll_pagination_fields = {
@@ -79,7 +79,8 @@ class MessageListApi(WebApiResource):
 
     @marshal_with(message_infinite_scroll_pagination_fields)
     def get(self, app_model, end_user):
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotChatAppError()
 
         parser = reqparse.RequestParser()
@@ -127,7 +128,7 @@ class MessageMoreLikeThisApi(WebApiResource):
         streaming = args['response_mode'] == 'streaming'
 
         try:
-            response = CompletionService.generate_more_like_this(
+            response = AppGenerateService.generate_more_like_this(
                 app_model=app_model,
                 user=end_user,
                 message_id=message_id,
@@ -135,7 +136,7 @@ class MessageMoreLikeThisApi(WebApiResource):
                 streaming=streaming
             )
 
-            return compact_response(response)
+            return helper.compact_generate_response(response)
         except MessageNotExistsError:
             raise NotFound("Message Not Exists.")
         except MoreLikeThisDisabledError:
@@ -155,20 +156,10 @@ class MessageMoreLikeThisApi(WebApiResource):
             raise InternalServerError()
 
 
-def compact_response(response: Union[dict, Generator]) -> Response:
-    if isinstance(response, dict):
-        return Response(response=json.dumps(response), status=200, mimetype='application/json')
-    else:
-        def generate() -> Generator:
-            yield from response
-
-        return Response(stream_with_context(generate()), status=200,
-                        mimetype='text/event-stream')
-
-
 class MessageSuggestedQuestionApi(WebApiResource):
     def get(self, app_model, end_user, message_id):
-        if app_model.mode != 'chat':
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
             raise NotCompletionAppError()
 
         message_id = str(message_id)
@@ -177,7 +168,8 @@ class MessageSuggestedQuestionApi(WebApiResource):
             questions = MessageService.get_suggested_questions_after_answer(
                 app_model=app_model,
                 user=end_user,
-                message_id=message_id
+                message_id=message_id,
+                invoke_from=InvokeFrom.WEB_APP
             )
         except MessageNotExistsError:
             raise NotFound("Message not found")

+ 0 - 4
api/controllers/web/site.py

@@ -83,7 +83,3 @@ class AppSiteInfo:
                 'remove_webapp_brand': remove_webapp_brand,
                 'replace_webapp_logo': replace_webapp_logo,
             }
-
-        if app.enable_site and site.prompt_public:
-            app_model_config = app.app_model_config
-            self.model_config = app_model_config

+ 82 - 0
api/controllers/web/workflow.py

@@ -0,0 +1,82 @@
+import logging
+
+from flask_restful import reqparse
+from werkzeug.exceptions import InternalServerError
+
+from controllers.web import api
+from controllers.web.error import (
+    CompletionRequestError,
+    NotWorkflowAppError,
+    ProviderModelCurrentlyNotSupportError,
+    ProviderNotInitializeError,
+    ProviderQuotaExceededError,
+)
+from controllers.web.wraps import WebApiResource
+from core.app.apps.base_app_queue_manager import AppQueueManager
+from core.app.entities.app_invoke_entities import InvokeFrom
+from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
+from core.model_runtime.errors.invoke import InvokeError
+from libs import helper
+from models.model import App, AppMode, EndUser
+from services.app_generate_service import AppGenerateService
+
+logger = logging.getLogger(__name__)
+
+
+class WorkflowRunApi(WebApiResource):
+    def post(self, app_model: App, end_user: EndUser):
+        """
+        Run workflow
+        """
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode != AppMode.WORKFLOW:
+            raise NotWorkflowAppError()
+
+        parser = reqparse.RequestParser()
+        parser.add_argument('inputs', type=dict, required=True, nullable=False, location='json')
+        parser.add_argument('files', type=list, required=False, location='json')
+        args = parser.parse_args()
+
+        try:
+            response = AppGenerateService.generate(
+                app_model=app_model,
+                user=end_user,
+                args=args,
+                invoke_from=InvokeFrom.WEB_APP,
+                streaming=True
+            )
+
+            return helper.compact_generate_response(response)
+        except ProviderTokenNotInitError as ex:
+            raise ProviderNotInitializeError(ex.description)
+        except QuotaExceededError:
+            raise ProviderQuotaExceededError()
+        except ModelCurrentlyNotSupportError:
+            raise ProviderModelCurrentlyNotSupportError()
+        except InvokeError as e:
+            raise CompletionRequestError(e.description)
+        except ValueError as e:
+            raise e
+        except Exception as e:
+            logging.exception("internal server error.")
+            raise InternalServerError()
+
+
+class WorkflowTaskStopApi(WebApiResource):
+    def post(self, app_model: App, end_user: EndUser, task_id: str):
+        """
+        Stop workflow task
+        """
+        app_mode = AppMode.value_of(app_model.mode)
+        if app_mode != AppMode.WORKFLOW:
+            raise NotWorkflowAppError()
+
+        AppQueueManager.set_stop_flag(task_id, InvokeFrom.WEB_APP, end_user.id)
+
+        return {
+            "result": "success"
+        }
+
+
+api.add_resource(WorkflowRunApi, '/workflows/run')
+api.add_resource(WorkflowTaskStopApi, '/workflows/tasks/<string:task_id>/stop')

+ 0 - 0
api/core/app_runner/__init__.py → api/core/agent/__init__.py


+ 50 - 183
api/core/features/assistant_base_runner.py → api/core/agent/base_agent_runner.py

@@ -2,22 +2,18 @@ import json
 import logging
 import uuid
 from datetime import datetime
-from mimetypes import guess_extension
 from typing import Optional, Union, cast
 
-from core.app_runner.app_runner import AppRunner
-from core.application_queue_manager import ApplicationQueueManager
+from core.agent.entities import AgentEntity, AgentToolEntity
+from core.app.apps.agent_chat.app_config_manager import AgentChatAppConfig
+from core.app.apps.base_app_queue_manager import AppQueueManager
+from core.app.apps.base_app_runner import AppRunner
+from core.app.entities.app_invoke_entities import (
+    AgentChatAppGenerateEntity,
+    ModelConfigWithCredentialsEntity,
+)
 from core.callback_handler.agent_tool_callback_handler import DifyAgentCallbackHandler
 from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
-from core.entities.application_entities import (
-    AgentEntity,
-    AgentToolEntity,
-    ApplicationGenerateEntity,
-    AppOrchestrationConfigEntity,
-    InvokeFrom,
-    ModelConfigEntity,
-)
-from core.file.message_file_parser import FileTransferMethod
 from core.memory.token_buffer_memory import TokenBufferMemory
 from core.model_manager import ModelInstance
 from core.model_runtime.entities.llm_entities import LLMUsage
@@ -34,27 +30,25 @@ from core.model_runtime.model_providers.__base.large_language_model import Large
 from core.model_runtime.utils.encoders import jsonable_encoder
 from core.tools.entities.tool_entities import (
     ToolInvokeMessage,
-    ToolInvokeMessageBinary,
     ToolParameter,
     ToolRuntimeVariablePool,
 )
 from core.tools.tool.dataset_retriever_tool import DatasetRetrieverTool
 from core.tools.tool.tool import Tool
-from core.tools.tool_file_manager import ToolFileManager
 from core.tools.tool_manager import ToolManager
 from extensions.ext_database import db
-from models.model import Message, MessageAgentThought, MessageFile
+from models.model import Message, MessageAgentThought
 from models.tools import ToolConversationVariables
 
 logger = logging.getLogger(__name__)
 
-class BaseAssistantApplicationRunner(AppRunner):
+class BaseAgentRunner(AppRunner):
     def __init__(self, tenant_id: str,
-                 application_generate_entity: ApplicationGenerateEntity,
-                 app_orchestration_config: AppOrchestrationConfigEntity,
-                 model_config: ModelConfigEntity,
+                 application_generate_entity: AgentChatAppGenerateEntity,
+                 app_config: AgentChatAppConfig,
+                 model_config: ModelConfigWithCredentialsEntity,
                  config: AgentEntity,
-                 queue_manager: ApplicationQueueManager,
+                 queue_manager: AppQueueManager,
                  message: Message,
                  user_id: str,
                  memory: Optional[TokenBufferMemory] = None,
@@ -66,7 +60,7 @@ class BaseAssistantApplicationRunner(AppRunner):
         """
         Agent runner
         :param tenant_id: tenant id
-        :param app_orchestration_config: app orchestration config
+        :param app_config: app generate entity
         :param model_config: model config
         :param config: dataset config
         :param queue_manager: queue manager
@@ -78,7 +72,7 @@ class BaseAssistantApplicationRunner(AppRunner):
         """
         self.tenant_id = tenant_id
         self.application_generate_entity = application_generate_entity
-        self.app_orchestration_config = app_orchestration_config
+        self.app_config = app_config
         self.model_config = model_config
         self.config = config
         self.queue_manager = queue_manager
@@ -97,16 +91,16 @@ class BaseAssistantApplicationRunner(AppRunner):
         # init dataset tools
         hit_callback = DatasetIndexToolCallbackHandler(
             queue_manager=queue_manager,
-            app_id=self.application_generate_entity.app_id,
+            app_id=self.app_config.app_id,
             message_id=message.id,
             user_id=user_id,
             invoke_from=self.application_generate_entity.invoke_from,
         )
         self.dataset_tools = DatasetRetrieverTool.get_dataset_tools(
             tenant_id=tenant_id,
-            dataset_ids=app_orchestration_config.dataset.dataset_ids if app_orchestration_config.dataset else [],
-            retrieve_config=app_orchestration_config.dataset.retrieve_config if app_orchestration_config.dataset else None,
-            return_resource=app_orchestration_config.show_retrieve_source,
+            dataset_ids=app_config.dataset.dataset_ids if app_config.dataset else [],
+            retrieve_config=app_config.dataset.retrieve_config if app_config.dataset else None,
+            return_resource=app_config.additional_features.show_retrieve_source,
             invoke_from=application_generate_entity.invoke_from,
             hit_callback=hit_callback
         )
@@ -124,14 +118,15 @@ class BaseAssistantApplicationRunner(AppRunner):
         else:
             self.stream_tool_call = False
 
-    def _repack_app_orchestration_config(self, app_orchestration_config: AppOrchestrationConfigEntity) -> AppOrchestrationConfigEntity:
+    def _repack_app_generate_entity(self, app_generate_entity: AgentChatAppGenerateEntity) \
+            -> AgentChatAppGenerateEntity:
         """
-        Repack app orchestration config
+        Repack app generate entity
         """
-        if app_orchestration_config.prompt_template.simple_prompt_template is None:
-            app_orchestration_config.prompt_template.simple_prompt_template = ''
+        if app_generate_entity.app_config.prompt_template.simple_prompt_template is None:
+            app_generate_entity.app_config.prompt_template.simple_prompt_template = ''
 
-        return app_orchestration_config
+        return app_generate_entity
 
     def _convert_tool_response_to_str(self, tool_response: list[ToolInvokeMessage]) -> str:
         """
@@ -158,7 +153,6 @@ class BaseAssistantApplicationRunner(AppRunner):
         tool_entity = ToolManager.get_agent_tool_runtime(
             tenant_id=self.tenant_id,
             agent_tool=tool,
-            agent_callback=self.agent_callback
         )
         tool_entity.load_variables(self.variables_pool)
 
@@ -272,87 +266,6 @@ class BaseAssistantApplicationRunner(AppRunner):
                     prompt_tool.parameters['required'].append(parameter.name)
 
         return prompt_tool
-    
-    def extract_tool_response_binary(self, tool_response: list[ToolInvokeMessage]) -> list[ToolInvokeMessageBinary]:
-        """
-        Extract tool response binary
-        """
-        result = []
-
-        for response in tool_response:
-            if response.type == ToolInvokeMessage.MessageType.IMAGE_LINK or \
-                response.type == ToolInvokeMessage.MessageType.IMAGE:
-                result.append(ToolInvokeMessageBinary(
-                    mimetype=response.meta.get('mime_type', 'octet/stream'),
-                    url=response.message,
-                    save_as=response.save_as,
-                ))
-            elif response.type == ToolInvokeMessage.MessageType.BLOB:
-                result.append(ToolInvokeMessageBinary(
-                    mimetype=response.meta.get('mime_type', 'octet/stream'),
-                    url=response.message,
-                    save_as=response.save_as,
-                ))
-            elif response.type == ToolInvokeMessage.MessageType.LINK:
-                # check if there is a mime type in meta
-                if response.meta and 'mime_type' in response.meta:
-                    result.append(ToolInvokeMessageBinary(
-                        mimetype=response.meta.get('mime_type', 'octet/stream') if response.meta else 'octet/stream',
-                        url=response.message,
-                        save_as=response.save_as,
-                    ))
-
-        return result
-    
-    def create_message_files(self, messages: list[ToolInvokeMessageBinary]) -> list[tuple[MessageFile, bool]]:
-        """
-        Create message file
-
-        :param messages: messages
-        :return: message files, should save as variable
-        """
-        result = []
-
-        for message in messages:
-            file_type = 'bin'
-            if 'image' in message.mimetype:
-                file_type = 'image'
-            elif 'video' in message.mimetype:
-                file_type = 'video'
-            elif 'audio' in message.mimetype:
-                file_type = 'audio'
-            elif 'text' in message.mimetype:
-                file_type = 'text'
-            elif 'pdf' in message.mimetype:
-                file_type = 'pdf'
-            elif 'zip' in message.mimetype:
-                file_type = 'archive'
-            # ...
-
-            invoke_from = self.application_generate_entity.invoke_from
-
-            message_file = MessageFile(
-                message_id=self.message.id,
-                type=file_type,
-                transfer_method=FileTransferMethod.TOOL_FILE.value,
-                belongs_to='assistant',
-                url=message.url,
-                upload_file_id=None,
-                created_by_role=('account'if invoke_from in [InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER] else 'end_user'),
-                created_by=self.user_id,
-            )
-            db.session.add(message_file)
-            db.session.commit()
-            db.session.refresh(message_file)
-
-            result.append((
-                message_file,
-                message.save_as
-            ))
-
-        db.session.close()
-            
-        return result
         
     def create_agent_thought(self, message_id: str, message: str, 
                              tool_name: str, tool_input: str, messages_ids: list[str]
@@ -366,6 +279,7 @@ class BaseAssistantApplicationRunner(AppRunner):
             thought='',
             tool=tool_name,
             tool_labels_str='{}',
+            tool_meta_str='{}',
             tool_input=tool_input,
             message=message,
             message_token=0,
@@ -400,7 +314,8 @@ class BaseAssistantApplicationRunner(AppRunner):
                            tool_name: str,
                            tool_input: Union[str, dict],
                            thought: str, 
-                           observation: str, 
+                           observation: Union[str, str], 
+                           tool_invoke_meta: Union[str, dict],
                            answer: str,
                            messages_ids: list[str],
                            llm_usage: LLMUsage = None) -> MessageAgentThought:
@@ -427,6 +342,12 @@ class BaseAssistantApplicationRunner(AppRunner):
             agent_thought.tool_input = tool_input
 
         if observation is not None:
+            if isinstance(observation, dict):
+                try:
+                    observation = json.dumps(observation, ensure_ascii=False)
+                except Exception as e:
+                    observation = json.dumps(observation)
+                    
             agent_thought.observation = observation
 
         if answer is not None:
@@ -460,75 +381,17 @@ class BaseAssistantApplicationRunner(AppRunner):
 
         agent_thought.tool_labels_str = json.dumps(labels)
 
-        db.session.commit()
-        db.session.close()
-    
-    def transform_tool_invoke_messages(self, messages: list[ToolInvokeMessage]) -> list[ToolInvokeMessage]:
-        """
-        Transform tool message into agent thought
-        """
-        result = []
-
-        for message in messages:
-            if message.type == ToolInvokeMessage.MessageType.TEXT:
-                result.append(message)
-            elif message.type == ToolInvokeMessage.MessageType.LINK:
-                result.append(message)
-            elif message.type == ToolInvokeMessage.MessageType.IMAGE:
-                # try to download image
+        if tool_invoke_meta is not None:
+            if isinstance(tool_invoke_meta, dict):
                 try:
-                    file = ToolFileManager.create_file_by_url(user_id=self.user_id, tenant_id=self.tenant_id,
-                                                               conversation_id=self.message.conversation_id,
-                                                               file_url=message.message)
-                    
-                    url = f'/files/tools/{file.id}{guess_extension(file.mimetype) or ".png"}'
-
-                    result.append(ToolInvokeMessage(
-                        type=ToolInvokeMessage.MessageType.IMAGE_LINK,
-                        message=url,
-                        save_as=message.save_as,
-                        meta=message.meta.copy() if message.meta is not None else {},
-                    ))
+                    tool_invoke_meta = json.dumps(tool_invoke_meta, ensure_ascii=False)
                 except Exception as e:
-                    logger.exception(e)
-                    result.append(ToolInvokeMessage(
-                        type=ToolInvokeMessage.MessageType.TEXT,
-                        message=f"Failed to download image: {message.message}, you can try to download it yourself.",
-                        meta=message.meta.copy() if message.meta is not None else {},
-                        save_as=message.save_as,
-                    ))
-            elif message.type == ToolInvokeMessage.MessageType.BLOB:
-                # get mime type and save blob to storage
-                mimetype = message.meta.get('mime_type', 'octet/stream')
-                # if message is str, encode it to bytes
-                if isinstance(message.message, str):
-                    message.message = message.message.encode('utf-8')
-                file = ToolFileManager.create_file_by_raw(user_id=self.user_id, tenant_id=self.tenant_id,
-                                                            conversation_id=self.message.conversation_id,
-                                                            file_binary=message.message,
-                                                            mimetype=mimetype)
-                                                            
-                url = f'/files/tools/{file.id}{guess_extension(file.mimetype) or ".bin"}'
-
-                # check if file is image
-                if 'image' in mimetype:
-                    result.append(ToolInvokeMessage(
-                        type=ToolInvokeMessage.MessageType.IMAGE_LINK,
-                        message=url,
-                        save_as=message.save_as,
-                        meta=message.meta.copy() if message.meta is not None else {},
-                    ))
-                else:
-                    result.append(ToolInvokeMessage(
-                        type=ToolInvokeMessage.MessageType.LINK,
-                        message=url,
-                        save_as=message.save_as,
-                        meta=message.meta.copy() if message.meta is not None else {},
-                    ))
-            else:
-                result.append(message)
+                    tool_invoke_meta = json.dumps(tool_invoke_meta)
 
-        return result
+            agent_thought.tool_meta_str = tool_invoke_meta
+
+        db.session.commit()
+        db.session.close()
     
     def update_db_variables(self, tool_variables: ToolRuntimeVariablePool, db_variables: ToolConversationVariables):
         """
@@ -569,8 +432,12 @@ class BaseAssistantApplicationRunner(AppRunner):
                         try:
                             tool_inputs = json.loads(agent_thought.tool_input)
                         except Exception as e:
-                            logging.warning("tool execution error: {}, tool_input: {}.".format(str(e), agent_thought.tool_input))
-                            tool_inputs = { agent_thought.tool: agent_thought.tool_input }
+                            tool_inputs = { tool: {} for tool in tools }
+                        try:
+                            tool_responses = json.loads(agent_thought.observation)
+                        except Exception as e:
+                            tool_responses = { tool: agent_thought.observation for tool in tools }
+
                         for tool in tools:
                             # generate a uuid for tool call
                             tool_call_id = str(uuid.uuid4())
@@ -583,7 +450,7 @@ class BaseAssistantApplicationRunner(AppRunner):
                                 )
                             ))
                             tool_call_response.append(ToolPromptMessage(
-                                content=agent_thought.observation,
+                                content=tool_responses.get(tool, agent_thought.observation),
                                 name=tool,
                                 tool_call_id=tool_call_id,
                             ))

+ 110 - 90
api/core/features/assistant_cot_runner.py → api/core/agent/cot_agent_runner.py

@@ -3,9 +3,10 @@ import re
 from collections.abc import Generator
 from typing import Literal, Union
 
-from core.application_queue_manager import PublishFrom
-from core.entities.application_entities import AgentPromptEntity, AgentScratchpadUnit
-from core.features.assistant_base_runner import BaseAssistantApplicationRunner
+from core.agent.base_agent_runner import BaseAgentRunner
+from core.agent.entities import AgentPromptEntity, AgentScratchpadUnit
+from core.app.apps.base_app_queue_manager import PublishFrom
+from core.app.entities.queue_entities import QueueAgentThoughtEvent, QueueMessageEndEvent, QueueMessageFileEvent
 from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
 from core.model_runtime.entities.message_entities import (
     AssistantPromptMessage,
@@ -16,18 +17,12 @@ from core.model_runtime.entities.message_entities import (
     UserPromptMessage,
 )
 from core.model_runtime.utils.encoders import jsonable_encoder
-from core.tools.errors import (
-    ToolInvokeError,
-    ToolNotFoundError,
-    ToolNotSupportedError,
-    ToolParameterValidationError,
-    ToolProviderCredentialValidationError,
-    ToolProviderNotFoundError,
-)
+from core.tools.entities.tool_entities import ToolInvokeMeta
+from core.tools.tool_engine import ToolEngine
 from models.model import Conversation, Message
 
 
-class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
+class CotAgentRunner(BaseAgentRunner):
     _is_first_iteration = True
     _ignore_observation_providers = ['wenxin']
 
@@ -39,30 +34,33 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
         """
         Run Cot agent application
         """
-        app_orchestration_config = self.app_orchestration_config
-        self._repack_app_orchestration_config(app_orchestration_config)
+        app_generate_entity = self.application_generate_entity
+        self._repack_app_generate_entity(app_generate_entity)
 
         agent_scratchpad: list[AgentScratchpadUnit] = []
         self._init_agent_scratchpad(agent_scratchpad, self.history_prompt_messages)
 
-        if 'Observation' not in app_orchestration_config.model_config.stop:
-            if app_orchestration_config.model_config.provider not in self._ignore_observation_providers:
-                app_orchestration_config.model_config.stop.append('Observation')
+        # check model mode
+        if 'Observation' not in app_generate_entity.model_config.stop:
+            if app_generate_entity.model_config.provider not in self._ignore_observation_providers:
+                app_generate_entity.model_config.stop.append('Observation')
+
+        app_config = self.app_config
 
         # override inputs
         inputs = inputs or {}
-        instruction = self.app_orchestration_config.prompt_template.simple_prompt_template
+        instruction = app_config.prompt_template.simple_prompt_template
         instruction = self._fill_in_inputs_from_external_data_tools(instruction, inputs)
 
         iteration_step = 1
-        max_iteration_steps = min(self.app_orchestration_config.agent.max_iteration, 5) + 1
+        max_iteration_steps = min(app_config.agent.max_iteration, 5) + 1
 
         prompt_messages = self.history_prompt_messages
 
         # convert tools into ModelRuntime Tool format
         prompt_messages_tools: list[PromptMessageTool] = []
         tool_instances = {}
-        for tool in self.app_orchestration_config.agent.tools if self.app_orchestration_config.agent else []:
+        for tool in app_config.agent.tools if app_config.agent else []:
             try:
                 prompt_tool, tool_entity = self._convert_tool_to_prompt_message_tool(tool)
             except Exception:
@@ -118,15 +116,17 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
             )
 
             if iteration_step > 1:
-                self.queue_manager.publish_agent_thought(agent_thought, PublishFrom.APPLICATION_MANAGER)
+                self.queue_manager.publish(QueueAgentThoughtEvent(
+                    agent_thought_id=agent_thought.id
+                ), PublishFrom.APPLICATION_MANAGER)
 
             # update prompt messages
             prompt_messages = self._organize_cot_prompt_messages(
-                mode=app_orchestration_config.model_config.mode,
+                mode=app_generate_entity.model_config.mode,
                 prompt_messages=prompt_messages,
                 tools=prompt_messages_tools,
                 agent_scratchpad=agent_scratchpad,
-                agent_prompt_message=app_orchestration_config.agent.prompt,
+                agent_prompt_message=app_config.agent.prompt,
                 instruction=instruction,
                 input=query
             )
@@ -136,9 +136,9 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
             # invoke model
             chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm(
                 prompt_messages=prompt_messages,
-                model_parameters=app_orchestration_config.model_config.parameters,
+                model_parameters=app_generate_entity.model_config.parameters,
                 tools=[],
-                stop=app_orchestration_config.model_config.stop,
+                stop=app_generate_entity.model_config.stop,
                 stream=True,
                 user=self.user_id,
                 callbacks=[],
@@ -160,7 +160,9 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
 
             # publish agent thought if it's first iteration
             if iteration_step == 1:
-                self.queue_manager.publish_agent_thought(agent_thought, PublishFrom.APPLICATION_MANAGER)
+                self.queue_manager.publish(QueueAgentThoughtEvent(
+                    agent_thought_id=agent_thought.id
+                ), PublishFrom.APPLICATION_MANAGER)
 
             for chunk in react_chunks:
                 if isinstance(chunk, dict):
@@ -214,7 +216,10 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
             
             self.save_agent_thought(agent_thought=agent_thought,
                                     tool_name=scratchpad.action.action_name if scratchpad.action else '',
-                                    tool_input=scratchpad.action.action_input if scratchpad.action else '',
+                                    tool_input={
+                                        scratchpad.action.action_name: scratchpad.action.action_input
+                                    } if scratchpad.action else '',
+                                    tool_invoke_meta={},
                                     thought=scratchpad.thought,
                                     observation='',
                                     answer=scratchpad.agent_response,
@@ -222,7 +227,9 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
                                     llm_usage=usage_dict['usage'])
             
             if scratchpad.action and scratchpad.action.action_name.lower() != "final answer":
-                self.queue_manager.publish_agent_thought(agent_thought, PublishFrom.APPLICATION_MANAGER)
+                self.queue_manager.publish(QueueAgentThoughtEvent(
+                    agent_thought_id=agent_thought.id
+                ), PublishFrom.APPLICATION_MANAGER)
 
             if not scratchpad.action:
                 # failed to extract action, return final answer directly
@@ -245,62 +252,65 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
                     tool_instance = tool_instances.get(tool_call_name)
                     if not tool_instance:
                         answer = f"there is not a tool named {tool_call_name}"
-                        self.save_agent_thought(agent_thought=agent_thought, 
-                                                tool_name='',
-                                                tool_input='',
-                                                thought=None, 
-                                                observation=answer, 
-                                                answer=answer,
-                                                messages_ids=[])
-                        self.queue_manager.publish_agent_thought(agent_thought, PublishFrom.APPLICATION_MANAGER)
+                        self.save_agent_thought(
+                            agent_thought=agent_thought, 
+                            tool_name='',
+                            tool_input='',
+                            tool_invoke_meta=ToolInvokeMeta.error_instance(
+                                f"there is not a tool named {tool_call_name}"
+                            ).to_dict(),
+                            thought=None, 
+                            observation={
+                                tool_call_name: answer
+                            }, 
+                            answer=answer,
+                            messages_ids=[]
+                        )
+                        self.queue_manager.publish(QueueAgentThoughtEvent(
+                            agent_thought_id=agent_thought.id
+                        ), PublishFrom.APPLICATION_MANAGER)
                     else:
+                        if isinstance(tool_call_args, str):
+                            try:
+                                tool_call_args = json.loads(tool_call_args)
+                            except json.JSONDecodeError:
+                                pass
+
                         # invoke tool
-                        error_response = None
-                        try:
-                            if isinstance(tool_call_args, str):
-                                try:
-                                    tool_call_args = json.loads(tool_call_args)
-                                except json.JSONDecodeError:
-                                    pass
-                            
-                            tool_response = tool_instance.invoke(
-                                user_id=self.user_id, 
-                                tool_parameters=tool_call_args
-                            )
-                            # transform tool response to llm friendly response
-                            tool_response = self.transform_tool_invoke_messages(tool_response)
-                            # extract binary data from tool invoke message
-                            binary_files = self.extract_tool_response_binary(tool_response)
-                            # create message file
-                            message_files = self.create_message_files(binary_files)
-                            # publish files
-                            for message_file, save_as in message_files:
-                                if save_as:
-                                    self.variables_pool.set_file(tool_name=tool_call_name,
-                                                                  value=message_file.id,
-                                                                  name=save_as)
-                                self.queue_manager.publish_message_file(message_file, PublishFrom.APPLICATION_MANAGER)
-
-                            message_file_ids = [message_file.id for message_file, _ in message_files]
-                        except ToolProviderCredentialValidationError as e:
-                            error_response = "Please check your tool provider credentials"
-                        except (
-                            ToolNotFoundError, ToolNotSupportedError, ToolProviderNotFoundError
-                        ) as e:
-                            error_response = f"there is not a tool named {tool_call_name}"
-                        except (
-                            ToolParameterValidationError
-                        ) as e:
-                            error_response = f"tool parameters validation error: {e}, please check your tool parameters"
-                        except ToolInvokeError as e:
-                            error_response = f"tool invoke error: {e}"
-                        except Exception as e:
-                            error_response = f"unknown error: {e}"
-
-                        if error_response:
-                            observation = error_response
-                        else:
-                            observation = self._convert_tool_response_to_str(tool_response)
+                        tool_invoke_response, message_files, tool_invoke_meta = ToolEngine.agent_invoke(
+                            tool=tool_instance,
+                            tool_parameters=tool_call_args,
+                            user_id=self.user_id,
+                            tenant_id=self.tenant_id,
+                            message=self.message,
+                            invoke_from=self.application_generate_entity.invoke_from,
+                            agent_tool_callback=self.agent_callback
+                        )
+                        # publish files
+                        for message_file, save_as in message_files:
+                            if save_as:
+                                self.variables_pool.set_file(tool_name=tool_call_name, value=message_file.id, name=save_as)
+
+                            # publish message file
+                            self.queue_manager.publish(QueueMessageFileEvent(
+                                message_file_id=message_file.id
+                            ), PublishFrom.APPLICATION_MANAGER)
+                            # add message file ids
+                            message_file_ids.append(message_file.id)
+
+                        # publish files
+                        for message_file, save_as in message_files:
+                            if save_as:
+                                self.variables_pool.set_file(tool_name=tool_call_name,
+                                                                value=message_file.id,
+                                                                name=save_as)
+                            self.queue_manager.publish(QueueMessageFileEvent(
+                                message_file_id=message_file.id
+                            ), PublishFrom.APPLICATION_MANAGER)
+
+                        message_file_ids = [message_file.id for message_file, _ in message_files]
+
+                        observation = tool_invoke_response
 
                         # save scratchpad
                         scratchpad.observation = observation
@@ -309,13 +319,22 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
                         self.save_agent_thought(
                             agent_thought=agent_thought, 
                             tool_name=tool_call_name,
-                            tool_input=tool_call_args,
+                            tool_input={
+                                tool_call_name: tool_call_args
+                            },
+                            tool_invoke_meta={
+                                tool_call_name: tool_invoke_meta.to_dict()
+                            },
                             thought=None,
-                            observation=observation, 
+                            observation={
+                                tool_call_name: observation
+                            }, 
                             answer=scratchpad.agent_response,
                             messages_ids=message_file_ids,
                         )
-                        self.queue_manager.publish_agent_thought(agent_thought, PublishFrom.APPLICATION_MANAGER)
+                        self.queue_manager.publish(QueueAgentThoughtEvent(
+                            agent_thought_id=agent_thought.id
+                        ), PublishFrom.APPLICATION_MANAGER)
 
                 # update prompt tool message
                 for prompt_tool in prompt_messages_tools:
@@ -340,16 +359,17 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
         self.save_agent_thought(
             agent_thought=agent_thought, 
             tool_name='',
-            tool_input='',
+            tool_input={},
+            tool_invoke_meta={},
             thought=final_answer,
-            observation='', 
+            observation={}, 
             answer=final_answer,
             messages_ids=[]
         )
 
         self.update_db_variables(self.variables_pool, self.db_variables_pool)
         # publish end event
-        self.queue_manager.publish_message_end(LLMResult(
+        self.queue_manager.publish(QueueMessageEndEvent(llm_result=LLMResult(
             model=model_instance.model,
             prompt_messages=prompt_messages,
             message=AssistantPromptMessage(
@@ -357,7 +377,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
             ),
             usage=llm_usage['usage'] if llm_usage['usage'] else LLMUsage.empty_usage(),
             system_fingerprint=''
-        ), PublishFrom.APPLICATION_MANAGER)
+        )), PublishFrom.APPLICATION_MANAGER)
 
     def _handle_stream_react(self, llm_response: Generator[LLMResultChunk, None, None], usage: dict) \
         -> Generator[Union[str, dict], None, None]:
@@ -550,7 +570,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
         """
             convert agent scratchpad list to str
         """
-        next_iteration = self.app_orchestration_config.agent.prompt.next_iteration
+        next_iteration = self.app_config.agent.prompt.next_iteration
 
         result = ''
         for scratchpad in agent_scratchpad:

+ 61 - 0
api/core/agent/entities.py

@@ -0,0 +1,61 @@
+from enum import Enum
+from typing import Any, Literal, Optional, Union
+
+from pydantic import BaseModel
+
+
+class AgentToolEntity(BaseModel):
+    """
+    Agent Tool Entity.
+    """
+    provider_type: Literal["builtin", "api"]
+    provider_id: str
+    tool_name: str
+    tool_parameters: dict[str, Any] = {}
+
+
+class AgentPromptEntity(BaseModel):
+    """
+    Agent Prompt Entity.
+    """
+    first_prompt: str
+    next_iteration: str
+
+
+class AgentScratchpadUnit(BaseModel):
+    """
+    Agent First Prompt Entity.
+    """
+
+    class Action(BaseModel):
+        """
+        Action Entity.
+        """
+        action_name: str
+        action_input: Union[dict, str]
+
+    agent_response: Optional[str] = None
+    thought: Optional[str] = None
+    action_str: Optional[str] = None
+    observation: Optional[str] = None
+    action: Optional[Action] = None
+
+
+class AgentEntity(BaseModel):
+    """
+    Agent Entity.
+    """
+
+    class Strategy(Enum):
+        """
+        Agent Strategy.
+        """
+        CHAIN_OF_THOUGHT = 'chain-of-thought'
+        FUNCTION_CALLING = 'function-calling'
+
+    provider: str
+    model: str
+    strategy: Strategy
+    prompt: Optional[AgentPromptEntity] = None
+    tools: list[AgentToolEntity] = None
+    max_iteration: int = 5

+ 69 - 82
api/core/features/assistant_fc_runner.py → api/core/agent/fc_agent_runner.py

@@ -3,8 +3,9 @@ import logging
 from collections.abc import Generator
 from typing import Any, Union
 
-from core.application_queue_manager import PublishFrom
-from core.features.assistant_base_runner import BaseAssistantApplicationRunner
+from core.agent.base_agent_runner import BaseAgentRunner
+from core.app.apps.base_app_queue_manager import PublishFrom
+from core.app.entities.queue_entities import QueueAgentThoughtEvent, QueueMessageEndEvent, QueueMessageFileEvent
 from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
 from core.model_runtime.entities.message_entities import (
     AssistantPromptMessage,
@@ -14,19 +15,13 @@ from core.model_runtime.entities.message_entities import (
     ToolPromptMessage,
     UserPromptMessage,
 )
-from core.tools.errors import (
-    ToolInvokeError,
-    ToolNotFoundError,
-    ToolNotSupportedError,
-    ToolParameterValidationError,
-    ToolProviderCredentialValidationError,
-    ToolProviderNotFoundError,
-)
+from core.tools.entities.tool_entities import ToolInvokeMeta
+from core.tools.tool_engine import ToolEngine
 from models.model import Conversation, Message, MessageAgentThought
 
 logger = logging.getLogger(__name__)
 
-class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
+class FunctionCallAgentRunner(BaseAgentRunner):
     def run(self, conversation: Conversation,
                 message: Message,
                 query: str,
@@ -34,9 +29,11 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
         """
         Run FunctionCall agent application
         """
-        app_orchestration_config = self.app_orchestration_config
+        app_generate_entity = self.application_generate_entity
+
+        app_config = self.app_config
 
-        prompt_template = self.app_orchestration_config.prompt_template.simple_prompt_template or ''
+        prompt_template = app_config.prompt_template.simple_prompt_template or ''
         prompt_messages = self.history_prompt_messages
         prompt_messages = self.organize_prompt_messages(
             prompt_template=prompt_template,
@@ -47,7 +44,7 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
         # convert tools into ModelRuntime Tool format
         prompt_messages_tools: list[PromptMessageTool] = []
         tool_instances = {}
-        for tool in self.app_orchestration_config.agent.tools if self.app_orchestration_config.agent else []:
+        for tool in app_config.agent.tools if app_config.agent else []:
             try:
                 prompt_tool, tool_entity = self._convert_tool_to_prompt_message_tool(tool)
             except Exception:
@@ -67,7 +64,7 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
             tool_instances[dataset_tool.identity.name] = dataset_tool
 
         iteration_step = 1
-        max_iteration_steps = min(app_orchestration_config.agent.max_iteration, 5) + 1
+        max_iteration_steps = min(app_config.agent.max_iteration, 5) + 1
 
         # continue to run until there is not any tool call
         function_call_state = True
@@ -110,9 +107,9 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
             # invoke model
             chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(
                 prompt_messages=prompt_messages,
-                model_parameters=app_orchestration_config.model_config.parameters,
+                model_parameters=app_generate_entity.model_config.parameters,
                 tools=prompt_messages_tools,
-                stop=app_orchestration_config.model_config.stop,
+                stop=app_generate_entity.model_config.stop,
                 stream=self.stream_tool_call,
                 user=self.user_id,
                 callbacks=[],
@@ -133,7 +130,9 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
                 is_first_chunk = True
                 for chunk in chunks:
                     if is_first_chunk:
-                        self.queue_manager.publish_agent_thought(agent_thought, PublishFrom.APPLICATION_MANAGER)
+                        self.queue_manager.publish(QueueAgentThoughtEvent(
+                            agent_thought_id=agent_thought.id
+                        ), PublishFrom.APPLICATION_MANAGER)
                         is_first_chunk = False
                     # check if there is any tool call
                     if self.check_tool_calls(chunk):
@@ -193,7 +192,9 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
                 if not result.message.content:
                     result.message.content = ''
 
-                self.queue_manager.publish_agent_thought(agent_thought, PublishFrom.APPLICATION_MANAGER)
+                self.queue_manager.publish(QueueAgentThoughtEvent(
+                    agent_thought_id=agent_thought.id
+                ), PublishFrom.APPLICATION_MANAGER)
                 
                 yield LLMResultChunk(
                     model=model_instance.model,
@@ -226,13 +227,15 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
                 tool_name=tool_call_names,
                 tool_input=tool_call_inputs,
                 thought=response,
+                tool_invoke_meta=None,
                 observation=None,
                 answer=response,
                 messages_ids=[],
                 llm_usage=current_llm_usage
             )
-
-            self.queue_manager.publish_agent_thought(agent_thought, PublishFrom.APPLICATION_MANAGER)
+            self.queue_manager.publish(QueueAgentThoughtEvent(
+                agent_thought_id=agent_thought.id
+            ), PublishFrom.APPLICATION_MANAGER)
             
             final_answer += response + '\n'
 
@@ -250,65 +253,40 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
                     tool_response = {
                         "tool_call_id": tool_call_id,
                         "tool_call_name": tool_call_name,
-                        "tool_response": f"there is not a tool named {tool_call_name}"
+                        "tool_response": f"there is not a tool named {tool_call_name}",
+                        "meta": ToolInvokeMeta.error_instance(f"there is not a tool named {tool_call_name}").to_dict()
                     }
-                    tool_responses.append(tool_response)
                 else:
                     # invoke tool
-                    error_response = None
-                    try:
-                        tool_invoke_message = tool_instance.invoke(
-                            user_id=self.user_id, 
-                            tool_parameters=tool_call_args, 
-                        )
-                        # transform tool invoke message to get LLM friendly message
-                        tool_invoke_message = self.transform_tool_invoke_messages(tool_invoke_message)
-                        # extract binary data from tool invoke message
-                        binary_files = self.extract_tool_response_binary(tool_invoke_message)
-                        # create message file
-                        message_files = self.create_message_files(binary_files)
-                        # publish files
-                        for message_file, save_as in message_files:
-                            if save_as:
-                                self.variables_pool.set_file(tool_name=tool_call_name, value=message_file.id, name=save_as)
-
-                            # publish message file
-                            self.queue_manager.publish_message_file(message_file, PublishFrom.APPLICATION_MANAGER)
-                            # add message file ids
-                            message_file_ids.append(message_file.id)
-                            
-                    except ToolProviderCredentialValidationError as e:
-                        error_response = "Please check your tool provider credentials"
-                    except (
-                        ToolNotFoundError, ToolNotSupportedError, ToolProviderNotFoundError
-                    ) as e:
-                        error_response = f"there is not a tool named {tool_call_name}"
-                    except (
-                        ToolParameterValidationError
-                    ) as e:
-                        error_response = f"tool parameters validation error: {e}, please check your tool parameters"
-                    except ToolInvokeError as e:
-                        error_response = f"tool invoke error: {e}"
-                    except Exception as e:
-                        error_response = f"unknown error: {e}"
-
-                    if error_response:
-                        observation = error_response
-                        tool_response = {
-                            "tool_call_id": tool_call_id,
-                            "tool_call_name": tool_call_name,
-                            "tool_response": error_response
-                        }
-                        tool_responses.append(tool_response)
-                    else:
-                        observation = self._convert_tool_response_to_str(tool_invoke_message)
-                        tool_response = {
-                            "tool_call_id": tool_call_id,
-                            "tool_call_name": tool_call_name,
-                            "tool_response": observation
-                        }
-                        tool_responses.append(tool_response)
-
+                    tool_invoke_response, message_files, tool_invoke_meta = ToolEngine.agent_invoke(
+                        tool=tool_instance,
+                        tool_parameters=tool_call_args,
+                        user_id=self.user_id,
+                        tenant_id=self.tenant_id,
+                        message=self.message,
+                        invoke_from=self.application_generate_entity.invoke_from,
+                        agent_tool_callback=self.agent_callback,
+                    )
+                    # publish files
+                    for message_file, save_as in message_files:
+                        if save_as:
+                            self.variables_pool.set_file(tool_name=tool_call_name, value=message_file.id, name=save_as)
+
+                        # publish message file
+                        self.queue_manager.publish(QueueMessageFileEvent(
+                            message_file_id=message_file.id
+                        ), PublishFrom.APPLICATION_MANAGER)
+                        # add message file ids
+                        message_file_ids.append(message_file.id)
+                    
+                    tool_response = {
+                        "tool_call_id": tool_call_id,
+                        "tool_call_name": tool_call_name,
+                        "tool_response": tool_invoke_response,
+                        "meta": tool_invoke_meta.to_dict()
+                    }
+                
+                tool_responses.append(tool_response)
                 prompt_messages = self.organize_prompt_messages(
                     prompt_template=prompt_template,
                     query=None,
@@ -325,11 +303,20 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
                     tool_name=None,
                     tool_input=None,
                     thought=None, 
-                    observation=tool_response['tool_response'], 
+                    tool_invoke_meta={
+                        tool_response['tool_call_name']: tool_response['meta'] 
+                        for tool_response in tool_responses
+                    },
+                    observation={
+                        tool_response['tool_call_name']: tool_response['tool_response'] 
+                        for tool_response in tool_responses
+                    },
                     answer=None,
                     messages_ids=message_file_ids
                 )
-                self.queue_manager.publish_agent_thought(agent_thought, PublishFrom.APPLICATION_MANAGER)
+                self.queue_manager.publish(QueueAgentThoughtEvent(
+                    agent_thought_id=agent_thought.id
+                ), PublishFrom.APPLICATION_MANAGER)
 
             # update prompt tool
             for prompt_tool in prompt_messages_tools:
@@ -339,15 +326,15 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
 
         self.update_db_variables(self.variables_pool, self.db_variables_pool)
         # publish end event
-        self.queue_manager.publish_message_end(LLMResult(
+        self.queue_manager.publish(QueueMessageEndEvent(llm_result=LLMResult(
             model=model_instance.model,
             prompt_messages=prompt_messages,
             message=AssistantPromptMessage(
-                content=final_answer,
+                content=final_answer
             ),
             usage=llm_usage['usage'] if llm_usage['usage'] else LLMUsage.empty_usage(),
             system_fingerprint=''
-        ), PublishFrom.APPLICATION_MANAGER)
+        )), PublishFrom.APPLICATION_MANAGER)
 
     def check_tool_calls(self, llm_result_chunk: LLMResultChunk) -> bool:
         """

+ 0 - 0
api/core/features/__init__.py → api/core/app/__init__.py


+ 0 - 0
api/core/features/dataset_retrieval/__init__.py → api/core/app/app_config/__init__.py


+ 76 - 0
api/core/app/app_config/base_app_config_manager.py

@@ -0,0 +1,76 @@
+from typing import Optional, Union
+
+from core.app.app_config.entities import AppAdditionalFeatures, EasyUIBasedAppModelConfigFrom
+from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
+from core.app.app_config.features.more_like_this.manager import MoreLikeThisConfigManager
+from core.app.app_config.features.opening_statement.manager import OpeningStatementConfigManager
+from core.app.app_config.features.retrieval_resource.manager import RetrievalResourceConfigManager
+from core.app.app_config.features.speech_to_text.manager import SpeechToTextConfigManager
+from core.app.app_config.features.suggested_questions_after_answer.manager import (
+    SuggestedQuestionsAfterAnswerConfigManager,
+)
+from core.app.app_config.features.text_to_speech.manager import TextToSpeechConfigManager
+from models.model import AppMode, AppModelConfig
+
+
+class BaseAppConfigManager:
+
+    @classmethod
+    def convert_to_config_dict(cls, config_from: EasyUIBasedAppModelConfigFrom,
+                               app_model_config: Union[AppModelConfig, dict],
+                               config_dict: Optional[dict] = None) -> dict:
+        """
+        Convert app model config to config dict
+        :param config_from: app model config from
+        :param app_model_config: app model config
+        :param config_dict: app model config dict
+        :return:
+        """
+        if config_from != EasyUIBasedAppModelConfigFrom.ARGS:
+            app_model_config_dict = app_model_config.to_dict()
+            config_dict = app_model_config_dict.copy()
+
+        return config_dict
+
+    @classmethod
+    def convert_features(cls, config_dict: dict, app_mode: AppMode) -> AppAdditionalFeatures:
+        """
+        Convert app config to app model config
+
+        :param config_dict: app config
+        :param app_mode: app mode
+        """
+        config_dict = config_dict.copy()
+
+        additional_features = AppAdditionalFeatures()
+        additional_features.show_retrieve_source = RetrievalResourceConfigManager.convert(
+            config=config_dict
+        )
+
+        additional_features.file_upload = FileUploadConfigManager.convert(
+            config=config_dict,
+            is_vision=app_mode in [AppMode.CHAT, AppMode.COMPLETION, AppMode.AGENT_CHAT]
+        )
+
+        additional_features.opening_statement, additional_features.suggested_questions = \
+            OpeningStatementConfigManager.convert(
+                config=config_dict
+            )
+
+        additional_features.suggested_questions_after_answer = SuggestedQuestionsAfterAnswerConfigManager.convert(
+            config=config_dict
+        )
+
+        additional_features.more_like_this = MoreLikeThisConfigManager.convert(
+            config=config_dict
+        )
+
+        additional_features.speech_to_text = SpeechToTextConfigManager.convert(
+            config=config_dict
+        )
+
+        additional_features.text_to_speech = TextToSpeechConfigManager.convert(
+            config=config_dict
+        )
+
+        return additional_features

+ 0 - 0
api/core/features/dataset_retrieval/agent/__init__.py → api/core/app/app_config/common/__init__.py


+ 0 - 0
api/core/features/dataset_retrieval/agent/output_parser/__init__.py → api/core/app/app_config/common/sensitive_word_avoidance/__init__.py


+ 50 - 0
api/core/app/app_config/common/sensitive_word_avoidance/manager.py

@@ -0,0 +1,50 @@
+from typing import Optional
+
+from core.app.app_config.entities import SensitiveWordAvoidanceEntity
+from core.moderation.factory import ModerationFactory
+
+
+class SensitiveWordAvoidanceConfigManager:
+    @classmethod
+    def convert(cls, config: dict) -> Optional[SensitiveWordAvoidanceEntity]:
+        sensitive_word_avoidance_dict = config.get('sensitive_word_avoidance')
+        if not sensitive_word_avoidance_dict:
+            return None
+
+        if 'enabled' in sensitive_word_avoidance_dict and sensitive_word_avoidance_dict['enabled']:
+            return SensitiveWordAvoidanceEntity(
+                type=sensitive_word_avoidance_dict.get('type'),
+                config=sensitive_word_avoidance_dict.get('config'),
+            )
+        else:
+            return None
+
+    @classmethod
+    def validate_and_set_defaults(cls, tenant_id, config: dict, only_structure_validate: bool = False) \
+            -> tuple[dict, list[str]]:
+        if not config.get("sensitive_word_avoidance"):
+            config["sensitive_word_avoidance"] = {
+                "enabled": False
+            }
+
+        if not isinstance(config["sensitive_word_avoidance"], dict):
+            raise ValueError("sensitive_word_avoidance must be of dict type")
+
+        if "enabled" not in config["sensitive_word_avoidance"] or not config["sensitive_word_avoidance"]["enabled"]:
+            config["sensitive_word_avoidance"]["enabled"] = False
+
+        if config["sensitive_word_avoidance"]["enabled"]:
+            if not config["sensitive_word_avoidance"].get("type"):
+                raise ValueError("sensitive_word_avoidance.type is required")
+
+            if not only_structure_validate:
+                typ = config["sensitive_word_avoidance"]["type"]
+                sensitive_word_avoidance_config = config["sensitive_word_avoidance"]["config"]
+
+                ModerationFactory.validate_config(
+                    name=typ,
+                    tenant_id=tenant_id,
+                    config=sensitive_word_avoidance_config
+                )
+
+        return config, ["sensitive_word_avoidance"]

+ 0 - 0
api/core/app/app_config/easy_ui_based_app/__init__.py


+ 0 - 0
api/core/app/app_config/easy_ui_based_app/agent/__init__.py


+ 78 - 0
api/core/app/app_config/easy_ui_based_app/agent/manager.py

@@ -0,0 +1,78 @@
+from typing import Optional
+
+from core.agent.entities import AgentEntity, AgentPromptEntity, AgentToolEntity
+from core.tools.prompt.template import REACT_PROMPT_TEMPLATES
+
+
+class AgentConfigManager:
+    @classmethod
+    def convert(cls, config: dict) -> Optional[AgentEntity]:
+        """
+        Convert model config to model config
+
+        :param config: model config args
+        """
+        if 'agent_mode' in config and config['agent_mode'] \
+                and 'enabled' in config['agent_mode']:
+
+            agent_dict = config.get('agent_mode', {})
+            agent_strategy = agent_dict.get('strategy', 'cot')
+
+            if agent_strategy == 'function_call':
+                strategy = AgentEntity.Strategy.FUNCTION_CALLING
+            elif agent_strategy == 'cot' or agent_strategy == 'react':
+                strategy = AgentEntity.Strategy.CHAIN_OF_THOUGHT
+            else:
+                # old configs, try to detect default strategy
+                if config['model']['provider'] == 'openai':
+                    strategy = AgentEntity.Strategy.FUNCTION_CALLING
+                else:
+                    strategy = AgentEntity.Strategy.CHAIN_OF_THOUGHT
+
+            agent_tools = []
+            for tool in agent_dict.get('tools', []):
+                keys = tool.keys()
+                if len(keys) >= 4:
+                    if "enabled" not in tool or not tool["enabled"]:
+                        continue
+
+                    agent_tool_properties = {
+                        'provider_type': tool['provider_type'],
+                        'provider_id': tool['provider_id'],
+                        'tool_name': tool['tool_name'],
+                        'tool_parameters': tool['tool_parameters'] if 'tool_parameters' in tool else {}
+                    }
+
+                    agent_tools.append(AgentToolEntity(**agent_tool_properties))
+
+            if 'strategy' in config['agent_mode'] and \
+                    config['agent_mode']['strategy'] not in ['react_router', 'router']:
+                agent_prompt = agent_dict.get('prompt', None) or {}
+                # check model mode
+                model_mode = config.get('model', {}).get('mode', 'completion')
+                if model_mode == 'completion':
+                    agent_prompt_entity = AgentPromptEntity(
+                        first_prompt=agent_prompt.get('first_prompt',
+                                                      REACT_PROMPT_TEMPLATES['english']['completion']['prompt']),
+                        next_iteration=agent_prompt.get('next_iteration',
+                                                        REACT_PROMPT_TEMPLATES['english']['completion'][
+                                                            'agent_scratchpad']),
+                    )
+                else:
+                    agent_prompt_entity = AgentPromptEntity(
+                        first_prompt=agent_prompt.get('first_prompt',
+                                                      REACT_PROMPT_TEMPLATES['english']['chat']['prompt']),
+                        next_iteration=agent_prompt.get('next_iteration',
+                                                        REACT_PROMPT_TEMPLATES['english']['chat']['agent_scratchpad']),
+                    )
+
+                return AgentEntity(
+                    provider=config['model']['provider'],
+                    model=config['model']['name'],
+                    strategy=strategy,
+                    prompt=agent_prompt_entity,
+                    tools=agent_tools,
+                    max_iteration=agent_dict.get('max_iteration', 5)
+                )
+
+        return None

+ 0 - 0
api/core/app/app_config/easy_ui_based_app/dataset/__init__.py


+ 224 - 0
api/core/app/app_config/easy_ui_based_app/dataset/manager.py

@@ -0,0 +1,224 @@
+from typing import Optional
+
+from core.app.app_config.entities import DatasetEntity, DatasetRetrieveConfigEntity
+from core.entities.agent_entities import PlanningStrategy
+from models.model import AppMode
+from services.dataset_service import DatasetService
+
+
+class DatasetConfigManager:
+    @classmethod
+    def convert(cls, config: dict) -> Optional[DatasetEntity]:
+        """
+        Convert model config to model config
+
+        :param config: model config args
+        """
+        dataset_ids = []
+        if 'datasets' in config.get('dataset_configs', {}):
+            datasets = config.get('dataset_configs', {}).get('datasets', {
+                'strategy': 'router',
+                'datasets': []
+            })
+
+            for dataset in datasets.get('datasets', []):
+                keys = list(dataset.keys())
+                if len(keys) == 0 or keys[0] != 'dataset':
+                    continue
+
+                dataset = dataset['dataset']
+
+                if 'enabled' not in dataset or not dataset['enabled']:
+                    continue
+
+                dataset_id = dataset.get('id', None)
+                if dataset_id:
+                    dataset_ids.append(dataset_id)
+
+        if 'agent_mode' in config and config['agent_mode'] \
+                and 'enabled' in config['agent_mode'] \
+                and config['agent_mode']['enabled']:
+
+            agent_dict = config.get('agent_mode', {})
+
+            for tool in agent_dict.get('tools', []):
+                keys = tool.keys()
+                if len(keys) == 1:
+                    # old standard
+                    key = list(tool.keys())[0]
+
+                    if key != 'dataset':
+                        continue
+
+                    tool_item = tool[key]
+
+                    if "enabled" not in tool_item or not tool_item["enabled"]:
+                        continue
+
+                    dataset_id = tool_item['id']
+                    dataset_ids.append(dataset_id)
+
+        if len(dataset_ids) == 0:
+            return None
+
+        # dataset configs
+        dataset_configs = config.get('dataset_configs', {'retrieval_model': 'single'})
+        query_variable = config.get('dataset_query_variable')
+
+        if dataset_configs['retrieval_model'] == 'single':
+            return DatasetEntity(
+                dataset_ids=dataset_ids,
+                retrieve_config=DatasetRetrieveConfigEntity(
+                    query_variable=query_variable,
+                    retrieve_strategy=DatasetRetrieveConfigEntity.RetrieveStrategy.value_of(
+                        dataset_configs['retrieval_model']
+                    )
+                )
+            )
+        else:
+            return DatasetEntity(
+                dataset_ids=dataset_ids,
+                retrieve_config=DatasetRetrieveConfigEntity(
+                    query_variable=query_variable,
+                    retrieve_strategy=DatasetRetrieveConfigEntity.RetrieveStrategy.value_of(
+                        dataset_configs['retrieval_model']
+                    ),
+                    top_k=dataset_configs.get('top_k'),
+                    score_threshold=dataset_configs.get('score_threshold'),
+                    reranking_model=dataset_configs.get('reranking_model')
+                )
+            )
+
+    @classmethod
+    def validate_and_set_defaults(cls, tenant_id: str, app_mode: AppMode, config: dict) -> tuple[dict, list[str]]:
+        """
+        Validate and set defaults for dataset feature
+
+        :param tenant_id: tenant ID
+        :param app_mode: app mode
+        :param config: app model config args
+        """
+        # Extract dataset config for legacy compatibility
+        config = cls.extract_dataset_config_for_legacy_compatibility(tenant_id, app_mode, config)
+
+        # dataset_configs
+        if not config.get("dataset_configs"):
+            config["dataset_configs"] = {'retrieval_model': 'single'}
+
+        if not config["dataset_configs"].get("datasets"):
+            config["dataset_configs"]["datasets"] = {
+                "strategy": "router",
+                "datasets": []
+            }
+
+        if not isinstance(config["dataset_configs"], dict):
+            raise ValueError("dataset_configs must be of object type")
+
+        if config["dataset_configs"]['retrieval_model'] == 'multiple':
+            if not config["dataset_configs"]['reranking_model']:
+                raise ValueError("reranking_model has not been set")
+            if not isinstance(config["dataset_configs"]['reranking_model'], dict):
+                raise ValueError("reranking_model must be of object type")
+
+        if not isinstance(config["dataset_configs"], dict):
+            raise ValueError("dataset_configs must be of object type")
+
+        need_manual_query_datasets = (config.get("dataset_configs")
+                                      and config["dataset_configs"].get("datasets", {}).get("datasets"))
+
+        if need_manual_query_datasets and app_mode == AppMode.COMPLETION:
+            # Only check when mode is completion
+            dataset_query_variable = config.get("dataset_query_variable")
+
+            if not dataset_query_variable:
+                raise ValueError("Dataset query variable is required when dataset is exist")
+
+        return config, ["agent_mode", "dataset_configs", "dataset_query_variable"]
+
+    @classmethod
+    def extract_dataset_config_for_legacy_compatibility(cls, tenant_id: str, app_mode: AppMode, config: dict) -> dict:
+        """
+        Extract dataset config for legacy compatibility
+
+        :param tenant_id: tenant ID
+        :param app_mode: app mode
+        :param config: app model config args
+        """
+        # Extract dataset config for legacy compatibility
+        if not config.get("agent_mode"):
+            config["agent_mode"] = {
+                "enabled": False,
+                "tools": []
+            }
+
+        if not isinstance(config["agent_mode"], dict):
+            raise ValueError("agent_mode must be of object type")
+
+        # enabled
+        if "enabled" not in config["agent_mode"] or not config["agent_mode"]["enabled"]:
+            config["agent_mode"]["enabled"] = False
+
+        if not isinstance(config["agent_mode"]["enabled"], bool):
+            raise ValueError("enabled in agent_mode must be of boolean type")
+
+        # tools
+        if not config["agent_mode"].get("tools"):
+            config["agent_mode"]["tools"] = []
+
+        if not isinstance(config["agent_mode"]["tools"], list):
+            raise ValueError("tools in agent_mode must be a list of objects")
+
+        # strategy
+        if not config["agent_mode"].get("strategy"):
+            config["agent_mode"]["strategy"] = PlanningStrategy.ROUTER.value
+
+        has_datasets = False
+        if config["agent_mode"]["strategy"] in [PlanningStrategy.ROUTER.value, PlanningStrategy.REACT_ROUTER.value]:
+            for tool in config["agent_mode"]["tools"]:
+                key = list(tool.keys())[0]
+                if key == "dataset":
+                    # old style, use tool name as key
+                    tool_item = tool[key]
+
+                    if "enabled" not in tool_item or not tool_item["enabled"]:
+                        tool_item["enabled"] = False
+
+                    if not isinstance(tool_item["enabled"], bool):
+                        raise ValueError("enabled in agent_mode.tools must be of boolean type")
+
+                    if 'id' not in tool_item:
+                        raise ValueError("id is required in dataset")
+
+                    try:
+                        uuid.UUID(tool_item["id"])
+                    except ValueError:
+                        raise ValueError("id in dataset must be of UUID type")
+
+                    if not cls.is_dataset_exists(tenant_id, tool_item["id"]):
+                        raise ValueError("Dataset ID does not exist, please check your permission.")
+
+                    has_datasets = True
+
+        need_manual_query_datasets = has_datasets and config["agent_mode"]["enabled"]
+
+        if need_manual_query_datasets and app_mode == AppMode.COMPLETION:
+            # Only check when mode is completion
+            dataset_query_variable = config.get("dataset_query_variable")
+
+            if not dataset_query_variable:
+                raise ValueError("Dataset query variable is required when dataset is exist")
+
+        return config
+
+    @classmethod
+    def is_dataset_exists(cls, tenant_id: str, dataset_id: str) -> bool:
+        # verify if the dataset ID exists
+        dataset = DatasetService.get_dataset(dataset_id)
+
+        if not dataset:
+            return False
+
+        if dataset.tenant_id != tenant_id:
+            return False
+
+        return True

+ 0 - 0
api/core/app/app_config/easy_ui_based_app/model_config/__init__.py


+ 103 - 0
api/core/app/app_config/easy_ui_based_app/model_config/converter.py

@@ -0,0 +1,103 @@
+from typing import cast
+
+from core.app.app_config.entities import EasyUIBasedAppConfig
+from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
+from core.entities.model_entities import ModelStatus
+from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
+from core.model_runtime.entities.model_entities import ModelType
+from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
+from core.provider_manager import ProviderManager
+
+
+class ModelConfigConverter:
+    @classmethod
+    def convert(cls, app_config: EasyUIBasedAppConfig,
+                skip_check: bool = False) \
+            -> ModelConfigWithCredentialsEntity:
+        """
+        Convert app model config dict to entity.
+        :param app_config: app config
+        :param skip_check: skip check
+        :raises ProviderTokenNotInitError: provider token not init error
+        :return: app orchestration config entity
+        """
+        model_config = app_config.model
+
+        provider_manager = ProviderManager()
+        provider_model_bundle = provider_manager.get_provider_model_bundle(
+            tenant_id=app_config.tenant_id,
+            provider=model_config.provider,
+            model_type=ModelType.LLM
+        )
+
+        provider_name = provider_model_bundle.configuration.provider.provider
+        model_name = model_config.model
+
+        model_type_instance = provider_model_bundle.model_type_instance
+        model_type_instance = cast(LargeLanguageModel, model_type_instance)
+
+        # check model credentials
+        model_credentials = provider_model_bundle.configuration.get_current_credentials(
+            model_type=ModelType.LLM,
+            model=model_config.model
+        )
+
+        if model_credentials is None:
+            if not skip_check:
+                raise ProviderTokenNotInitError(f"Model {model_name} credentials is not initialized.")
+            else:
+                model_credentials = {}
+
+        if not skip_check:
+            # check model
+            provider_model = provider_model_bundle.configuration.get_provider_model(
+                model=model_config.model,
+                model_type=ModelType.LLM
+            )
+
+            if provider_model is None:
+                model_name = model_config.model
+                raise ValueError(f"Model {model_name} not exist.")
+
+            if provider_model.status == ModelStatus.NO_CONFIGURE:
+                raise ProviderTokenNotInitError(f"Model {model_name} credentials is not initialized.")
+            elif provider_model.status == ModelStatus.NO_PERMISSION:
+                raise ModelCurrentlyNotSupportError(f"Dify Hosted OpenAI {model_name} currently not support.")
+            elif provider_model.status == ModelStatus.QUOTA_EXCEEDED:
+                raise QuotaExceededError(f"Model provider {provider_name} quota exceeded.")
+
+        # model config
+        completion_params = model_config.parameters
+        stop = []
+        if 'stop' in completion_params:
+            stop = completion_params['stop']
+            del completion_params['stop']
+
+        # get model mode
+        model_mode = model_config.mode
+        if not model_mode:
+            mode_enum = model_type_instance.get_model_mode(
+                model=model_config.model,
+                credentials=model_credentials
+            )
+
+            model_mode = mode_enum.value
+
+        model_schema = model_type_instance.get_model_schema(
+            model_config.model,
+            model_credentials
+        )
+
+        if not skip_check and not model_schema:
+            raise ValueError(f"Model {model_name} not exist.")
+
+        return ModelConfigWithCredentialsEntity(
+            provider=model_config.provider,
+            model=model_config.model,
+            model_schema=model_schema,
+            mode=model_mode,
+            provider_model_bundle=provider_model_bundle,
+            credentials=model_credentials,
+            parameters=completion_params,
+            stop=stop,
+        )

+ 112 - 0
api/core/app/app_config/easy_ui_based_app/model_config/manager.py

@@ -0,0 +1,112 @@
+from core.app.app_config.entities import ModelConfigEntity
+from core.model_runtime.entities.model_entities import ModelPropertyKey, ModelType
+from core.model_runtime.model_providers import model_provider_factory
+from core.provider_manager import ProviderManager
+
+
+class ModelConfigManager:
+    @classmethod
+    def convert(cls, config: dict) -> ModelConfigEntity:
+        """
+        Convert model config to model config
+
+        :param config: model config args
+        """
+        # model config
+        model_config = config.get('model')
+
+        if not model_config:
+            raise ValueError("model is required")
+
+        completion_params = model_config.get('completion_params')
+        stop = []
+        if 'stop' in completion_params:
+            stop = completion_params['stop']
+            del completion_params['stop']
+
+        # get model mode
+        model_mode = model_config.get('mode')
+
+        return ModelConfigEntity(
+            provider=config['model']['provider'],
+            model=config['model']['name'],
+            mode=model_mode,
+            parameters=completion_params,
+            stop=stop,
+        )
+
+    @classmethod
+    def validate_and_set_defaults(cls, tenant_id: str, config: dict) -> tuple[dict, list[str]]:
+        """
+        Validate and set defaults for model config
+
+        :param tenant_id: tenant id
+        :param config: app model config args
+        """
+        if 'model' not in config:
+            raise ValueError("model is required")
+
+        if not isinstance(config["model"], dict):
+            raise ValueError("model must be of object type")
+
+        # model.provider
+        provider_entities = model_provider_factory.get_providers()
+        model_provider_names = [provider.provider for provider in provider_entities]
+        if 'provider' not in config["model"] or config["model"]["provider"] not in model_provider_names:
+            raise ValueError(f"model.provider is required and must be in {str(model_provider_names)}")
+
+        # model.name
+        if 'name' not in config["model"]:
+            raise ValueError("model.name is required")
+
+        provider_manager = ProviderManager()
+        models = provider_manager.get_configurations(tenant_id).get_models(
+            provider=config["model"]["provider"],
+            model_type=ModelType.LLM
+        )
+
+        if not models:
+            raise ValueError("model.name must be in the specified model list")
+
+        model_ids = [m.model for m in models]
+        if config["model"]["name"] not in model_ids:
+            raise ValueError("model.name must be in the specified model list")
+
+        model_mode = None
+        for model in models:
+            if model.model == config["model"]["name"]:
+                model_mode = model.model_properties.get(ModelPropertyKey.MODE)
+                break
+
+        # model.mode
+        if model_mode:
+            config['model']["mode"] = model_mode
+        else:
+            config['model']["mode"] = "completion"
+
+        # model.completion_params
+        if 'completion_params' not in config["model"]:
+            raise ValueError("model.completion_params is required")
+
+        config["model"]["completion_params"] = cls.validate_model_completion_params(
+            config["model"]["completion_params"]
+        )
+
+        return config, ["model"]
+
+    @classmethod
+    def validate_model_completion_params(cls, cp: dict) -> dict:
+        # model.completion_params
+        if not isinstance(cp, dict):
+            raise ValueError("model.completion_params must be of object type")
+
+        # stop
+        if 'stop' not in cp:
+            cp["stop"] = []
+        elif not isinstance(cp["stop"], list):
+            raise ValueError("stop in model.completion_params must be of list type")
+
+        if len(cp["stop"]) > 4:
+            raise ValueError("stop sequences must be less than 4")
+
+        return cp

+ 0 - 0
api/core/app/app_config/easy_ui_based_app/prompt_template/__init__.py


+ 140 - 0
api/core/app/app_config/easy_ui_based_app/prompt_template/manager.py

@@ -0,0 +1,140 @@
+from core.app.app_config.entities import (
+    AdvancedChatPromptTemplateEntity,
+    AdvancedCompletionPromptTemplateEntity,
+    PromptTemplateEntity,
+)
+from core.model_runtime.entities.message_entities import PromptMessageRole
+from core.prompt.simple_prompt_transform import ModelMode
+from models.model import AppMode
+
+
+class PromptTemplateConfigManager:
+    @classmethod
+    def convert(cls, config: dict) -> PromptTemplateEntity:
+        if not config.get("prompt_type"):
+            raise ValueError("prompt_type is required")
+
+        prompt_type = PromptTemplateEntity.PromptType.value_of(config['prompt_type'])
+        if prompt_type == PromptTemplateEntity.PromptType.SIMPLE:
+            simple_prompt_template = config.get("pre_prompt", "")
+            return PromptTemplateEntity(
+                prompt_type=prompt_type,
+                simple_prompt_template=simple_prompt_template
+            )
+        else:
+            advanced_chat_prompt_template = None
+            chat_prompt_config = config.get("chat_prompt_config", {})
+            if chat_prompt_config:
+                chat_prompt_messages = []
+                for message in chat_prompt_config.get("prompt", []):
+                    chat_prompt_messages.append({
+                        "text": message["text"],
+                        "role": PromptMessageRole.value_of(message["role"])
+                    })
+
+                advanced_chat_prompt_template = AdvancedChatPromptTemplateEntity(
+                    messages=chat_prompt_messages
+                )
+
+            advanced_completion_prompt_template = None
+            completion_prompt_config = config.get("completion_prompt_config", {})
+            if completion_prompt_config:
+                completion_prompt_template_params = {
+                    'prompt': completion_prompt_config['prompt']['text'],
+                }
+
+                if 'conversation_histories_role' in completion_prompt_config:
+                    completion_prompt_template_params['role_prefix'] = {
+                        'user': completion_prompt_config['conversation_histories_role']['user_prefix'],
+                        'assistant': completion_prompt_config['conversation_histories_role']['assistant_prefix']
+                    }
+
+                advanced_completion_prompt_template = AdvancedCompletionPromptTemplateEntity(
+                    **completion_prompt_template_params
+                )
+
+            return PromptTemplateEntity(
+                prompt_type=prompt_type,
+                advanced_chat_prompt_template=advanced_chat_prompt_template,
+                advanced_completion_prompt_template=advanced_completion_prompt_template
+            )
+
+    @classmethod
+    def validate_and_set_defaults(cls, app_mode: AppMode, config: dict) -> tuple[dict, list[str]]:
+        """
+        Validate pre_prompt and set defaults for prompt feature
+        depending on the config['model']
+
+        :param app_mode: app mode
+        :param config: app model config args
+        """
+        if not config.get("prompt_type"):
+            config["prompt_type"] = PromptTemplateEntity.PromptType.SIMPLE.value
+
+        prompt_type_vals = [typ.value for typ in PromptTemplateEntity.PromptType]
+        if config['prompt_type'] not in prompt_type_vals:
+            raise ValueError(f"prompt_type must be in {prompt_type_vals}")
+
+        # chat_prompt_config
+        if not config.get("chat_prompt_config"):
+            config["chat_prompt_config"] = {}
+
+        if not isinstance(config["chat_prompt_config"], dict):
+            raise ValueError("chat_prompt_config must be of object type")
+
+        # completion_prompt_config
+        if not config.get("completion_prompt_config"):
+            config["completion_prompt_config"] = {}
+
+        if not isinstance(config["completion_prompt_config"], dict):
+            raise ValueError("completion_prompt_config must be of object type")
+
+        if config['prompt_type'] == PromptTemplateEntity.PromptType.ADVANCED.value:
+            if not config['chat_prompt_config'] and not config['completion_prompt_config']:
+                raise ValueError("chat_prompt_config or completion_prompt_config is required "
+                                 "when prompt_type is advanced")
+
+            model_mode_vals = [mode.value for mode in ModelMode]
+            if config['model']["mode"] not in model_mode_vals:
+                raise ValueError(f"model.mode must be in {model_mode_vals} when prompt_type is advanced")
+
+            if app_mode == AppMode.CHAT and config['model']["mode"] == ModelMode.COMPLETION.value:
+                user_prefix = config['completion_prompt_config']['conversation_histories_role']['user_prefix']
+                assistant_prefix = config['completion_prompt_config']['conversation_histories_role']['assistant_prefix']
+
+                if not user_prefix:
+                    config['completion_prompt_config']['conversation_histories_role']['user_prefix'] = 'Human'
+
+                if not assistant_prefix:
+                    config['completion_prompt_config']['conversation_histories_role']['assistant_prefix'] = 'Assistant'
+
+            if config['model']["mode"] == ModelMode.CHAT.value:
+                prompt_list = config['chat_prompt_config']['prompt']
+
+                if len(prompt_list) > 10:
+                    raise ValueError("prompt messages must be less than 10")
+        else:
+            # pre_prompt, for simple mode
+            if not config.get("pre_prompt"):
+                config["pre_prompt"] = ""
+
+            if not isinstance(config["pre_prompt"], str):
+                raise ValueError("pre_prompt must be of string type")
+
+        return config, ["prompt_type", "pre_prompt", "chat_prompt_config", "completion_prompt_config"]
+
+    @classmethod
+    def validate_post_prompt_and_set_defaults(cls, config: dict) -> dict:
+        """
+        Validate post_prompt and set defaults for prompt feature
+
+        :param config: app model config args
+        """
+        # post_prompt
+        if not config.get("post_prompt"):
+            config["post_prompt"] = ""
+
+        if not isinstance(config["post_prompt"], str):
+            raise ValueError("post_prompt must be of string type")
+
+        return config

+ 0 - 0
api/core/app/app_config/easy_ui_based_app/variables/__init__.py


+ 186 - 0
api/core/app/app_config/easy_ui_based_app/variables/manager.py

@@ -0,0 +1,186 @@
+import re
+
+from core.app.app_config.entities import ExternalDataVariableEntity, VariableEntity
+from core.external_data_tool.factory import ExternalDataToolFactory
+
+
+class BasicVariablesConfigManager:
+    @classmethod
+    def convert(cls, config: dict) -> tuple[list[VariableEntity], list[ExternalDataVariableEntity]]:
+        """
+        Convert model config to model config
+
+        :param config: model config args
+        """
+        external_data_variables = []
+        variables = []
+
+        # old external_data_tools
+        external_data_tools = config.get('external_data_tools', [])
+        for external_data_tool in external_data_tools:
+            if 'enabled' not in external_data_tool or not external_data_tool['enabled']:
+                continue
+
+            external_data_variables.append(
+                ExternalDataVariableEntity(
+                    variable=external_data_tool['variable'],
+                    type=external_data_tool['type'],
+                    config=external_data_tool['config']
+                )
+            )
+
+        # variables and external_data_tools
+        for variable in config.get('user_input_form', []):
+            typ = list(variable.keys())[0]
+            if typ == 'external_data_tool':
+                val = variable[typ]
+                if 'config' not in val:
+                    continue
+
+                external_data_variables.append(
+                    ExternalDataVariableEntity(
+                        variable=val['variable'],
+                        type=val['type'],
+                        config=val['config']
+                    )
+                )
+            elif typ in [
+                VariableEntity.Type.TEXT_INPUT.value,
+                VariableEntity.Type.PARAGRAPH.value,
+                VariableEntity.Type.NUMBER.value,
+            ]:
+                variables.append(
+                    VariableEntity(
+                        type=VariableEntity.Type.value_of(typ),
+                        variable=variable[typ].get('variable'),
+                        description=variable[typ].get('description'),
+                        label=variable[typ].get('label'),
+                        required=variable[typ].get('required', False),
+                        max_length=variable[typ].get('max_length'),
+                        default=variable[typ].get('default'),
+                    )
+                )
+            elif typ == VariableEntity.Type.SELECT.value:
+                variables.append(
+                    VariableEntity(
+                        type=VariableEntity.Type.SELECT,
+                        variable=variable[typ].get('variable'),
+                        description=variable[typ].get('description'),
+                        label=variable[typ].get('label'),
+                        required=variable[typ].get('required', False),
+                        options=variable[typ].get('options'),
+                        default=variable[typ].get('default'),
+                    )
+                )
+
+        return variables, external_data_variables
+
+    @classmethod
+    def validate_and_set_defaults(cls, tenant_id: str, config: dict) -> tuple[dict, list[str]]:
+        """
+        Validate and set defaults for user input form
+
+        :param tenant_id: workspace id
+        :param config: app model config args
+        """
+        related_config_keys = []
+        config, current_related_config_keys = cls.validate_variables_and_set_defaults(config)
+        related_config_keys.extend(current_related_config_keys)
+
+        config, current_related_config_keys = cls.validate_external_data_tools_and_set_defaults(tenant_id, config)
+        related_config_keys.extend(current_related_config_keys)
+
+        return config, related_config_keys
+
+    @classmethod
+    def validate_variables_and_set_defaults(cls, config: dict) -> tuple[dict, list[str]]:
+        """
+        Validate and set defaults for user input form
+
+        :param config: app model config args
+        """
+        if not config.get("user_input_form"):
+            config["user_input_form"] = []
+
+        if not isinstance(config["user_input_form"], list):
+            raise ValueError("user_input_form must be a list of objects")
+
+        variables = []
+        for item in config["user_input_form"]:
+            key = list(item.keys())[0]
+            if key not in ["text-input", "select", "paragraph", "number", "external_data_tool"]:
+                raise ValueError("Keys in user_input_form list can only be 'text-input', 'paragraph'  or 'select'")
+
+            form_item = item[key]
+            if 'label' not in form_item:
+                raise ValueError("label is required in user_input_form")
+
+            if not isinstance(form_item["label"], str):
+                raise ValueError("label in user_input_form must be of string type")
+
+            if 'variable' not in form_item:
+                raise ValueError("variable is required in user_input_form")
+
+            if not isinstance(form_item["variable"], str):
+                raise ValueError("variable in user_input_form must be of string type")
+
+            pattern = re.compile(r"^(?!\d)[\u4e00-\u9fa5A-Za-z0-9_\U0001F300-\U0001F64F\U0001F680-\U0001F6FF]{1,100}$")
+            if pattern.match(form_item["variable"]) is None:
+                raise ValueError("variable in user_input_form must be a string, "
+                                 "and cannot start with a number")
+
+            variables.append(form_item["variable"])
+
+            if 'required' not in form_item or not form_item["required"]:
+                form_item["required"] = False
+
+            if not isinstance(form_item["required"], bool):
+                raise ValueError("required in user_input_form must be of boolean type")
+
+            if key == "select":
+                if 'options' not in form_item or not form_item["options"]:
+                    form_item["options"] = []
+
+                if not isinstance(form_item["options"], list):
+                    raise ValueError("options in user_input_form must be a list of strings")
+
+                if "default" in form_item and form_item['default'] \
+                        and form_item["default"] not in form_item["options"]:
+                    raise ValueError("default value in user_input_form must be in the options list")
+
+        return config, ["user_input_form"]
+
+    @classmethod
+    def validate_external_data_tools_and_set_defaults(cls, tenant_id: str, config: dict) -> tuple[dict, list[str]]:
+        """
+        Validate and set defaults for external data fetch feature
+
+        :param tenant_id: workspace id
+        :param config: app model config args
+        """
+        if not config.get("external_data_tools"):
+            config["external_data_tools"] = []
+
+        if not isinstance(config["external_data_tools"], list):
+            raise ValueError("external_data_tools must be of list type")
+
+        for tool in config["external_data_tools"]:
+            if "enabled" not in tool or not tool["enabled"]:
+                tool["enabled"] = False
+
+            if not tool["enabled"]:
+                continue
+
+            if "type" not in tool or not tool["type"]:
+                raise ValueError("external_data_tools[].type is required")
+
+            typ = tool["type"]
+            config = tool["config"]
+
+            ExternalDataToolFactory.validate_config(
+                name=typ,
+                tenant_id=tenant_id,
+                config=config
+            )
+
+        return config, ["external_data_tools"]

+ 70 - 137
api/core/entities/application_entities.py → api/core/app/app_config/entities.py

@@ -1,12 +1,10 @@
 from enum import Enum
-from typing import Any, Literal, Optional, Union
+from typing import Any, Optional
 
 from pydantic import BaseModel
 
-from core.entities.provider_configuration import ProviderModelBundle
-from core.file.file_obj import FileObj
 from core.model_runtime.entities.message_entities import PromptMessageRole
-from core.model_runtime.entities.model_entities import AIModelEntity
+from models.model import AppMode
 
 
 class ModelConfigEntity(BaseModel):
@@ -15,10 +13,7 @@ class ModelConfigEntity(BaseModel):
     """
     provider: str
     model: str
-    model_schema: AIModelEntity
-    mode: str
-    provider_model_bundle: ProviderModelBundle
-    credentials: dict[str, Any] = {}
+    mode: Optional[str] = None
     parameters: dict[str, Any] = {}
     stop: list[str] = []
 
@@ -86,6 +81,40 @@ class PromptTemplateEntity(BaseModel):
     advanced_completion_prompt_template: Optional[AdvancedCompletionPromptTemplateEntity] = None
 
 
+class VariableEntity(BaseModel):
+    """
+    Variable Entity.
+    """
+    class Type(Enum):
+        TEXT_INPUT = 'text-input'
+        SELECT = 'select'
+        PARAGRAPH = 'paragraph'
+        NUMBER = 'number'
+
+        @classmethod
+        def value_of(cls, value: str) -> 'VariableEntity.Type':
+            """
+            Get value of given mode.
+
+            :param value: mode value
+            :return: mode
+            """
+            for mode in cls:
+                if mode.value == value:
+                    return mode
+            raise ValueError(f'invalid variable type value {value}')
+
+    variable: str
+    label: str
+    description: Optional[str] = None
+    type: Type
+    required: bool = False
+    max_length: Optional[int] = None
+    options: Optional[list[str]] = None
+    default: Optional[str] = None
+    hint: Optional[str] = None
+
+
 class ExternalDataVariableEntity(BaseModel):
     """
     External Data Variable Entity.
@@ -124,7 +153,6 @@ class DatasetRetrieveConfigEntity(BaseModel):
     query_variable: Optional[str] = None  # Only when app mode is completion
 
     retrieve_strategy: RetrieveStrategy
-    single_strategy: Optional[str] = None  # for temp
     top_k: Optional[int] = None
     score_threshold: Optional[float] = None
     reranking_model: Optional[dict] = None
@@ -155,155 +183,60 @@ class TextToSpeechEntity(BaseModel):
     language: Optional[str] = None
 
 
-class FileUploadEntity(BaseModel):
+class FileExtraConfig(BaseModel):
     """
     File Upload Entity.
     """
     image_config: Optional[dict[str, Any]] = None
 
 
-class AgentToolEntity(BaseModel):
-    """
-    Agent Tool Entity.
-    """
-    provider_type: Literal["builtin", "api"]
-    provider_id: str
-    tool_name: str
-    tool_parameters: dict[str, Any] = {}
-
-
-class AgentPromptEntity(BaseModel):
-    """
-    Agent Prompt Entity.
-    """
-    first_prompt: str
-    next_iteration: str
+class AppAdditionalFeatures(BaseModel):
+    file_upload: Optional[FileExtraConfig] = None
+    opening_statement: Optional[str] = None
+    suggested_questions: list[str] = []
+    suggested_questions_after_answer: bool = False
+    show_retrieve_source: bool = False
+    more_like_this: bool = False
+    speech_to_text: bool = False
+    text_to_speech: Optional[TextToSpeechEntity] = None
 
 
-class AgentScratchpadUnit(BaseModel):
+class AppConfig(BaseModel):
     """
-    Agent First Prompt Entity.
+    Application Config Entity.
     """
-
-    class Action(BaseModel):
-        """
-        Action Entity.
-        """
-        action_name: str
-        action_input: Union[dict, str]
-
-    agent_response: Optional[str] = None
-    thought: Optional[str] = None
-    action_str: Optional[str] = None
-    observation: Optional[str] = None
-    action: Optional[Action] = None
+    tenant_id: str
+    app_id: str
+    app_mode: AppMode
+    additional_features: AppAdditionalFeatures
+    variables: list[VariableEntity] = []
+    sensitive_word_avoidance: Optional[SensitiveWordAvoidanceEntity] = None
 
 
-class AgentEntity(BaseModel):
+class EasyUIBasedAppModelConfigFrom(Enum):
     """
-    Agent Entity.
+    App Model Config From.
     """
+    ARGS = 'args'
+    APP_LATEST_CONFIG = 'app-latest-config'
+    CONVERSATION_SPECIFIC_CONFIG = 'conversation-specific-config'
 
-    class Strategy(Enum):
-        """
-        Agent Strategy.
-        """
-        CHAIN_OF_THOUGHT = 'chain-of-thought'
-        FUNCTION_CALLING = 'function-calling'
-
-    provider: str
-    model: str
-    strategy: Strategy
-    prompt: Optional[AgentPromptEntity] = None
-    tools: list[AgentToolEntity] = None
-    max_iteration: int = 5
 
-
-class AppOrchestrationConfigEntity(BaseModel):
+class EasyUIBasedAppConfig(AppConfig):
     """
-    App Orchestration Config Entity.
+    Easy UI Based App Config Entity.
     """
-    model_config: ModelConfigEntity
+    app_model_config_from: EasyUIBasedAppModelConfigFrom
+    app_model_config_id: str
+    app_model_config_dict: dict
+    model: ModelConfigEntity
     prompt_template: PromptTemplateEntity
-    external_data_variables: list[ExternalDataVariableEntity] = []
-    agent: Optional[AgentEntity] = None
-
-    # features
     dataset: Optional[DatasetEntity] = None
-    file_upload: Optional[FileUploadEntity] = None
-    opening_statement: Optional[str] = None
-    suggested_questions_after_answer: bool = False
-    show_retrieve_source: bool = False
-    more_like_this: bool = False
-    speech_to_text: bool = False
-    text_to_speech: dict = {}
-    sensitive_word_avoidance: Optional[SensitiveWordAvoidanceEntity] = None
-
-
-class InvokeFrom(Enum):
-    """
-    Invoke From.
-    """
-    SERVICE_API = 'service-api'
-    WEB_APP = 'web-app'
-    EXPLORE = 'explore'
-    DEBUGGER = 'debugger'
-
-    @classmethod
-    def value_of(cls, value: str) -> 'InvokeFrom':
-        """
-        Get value of given mode.
-
-        :param value: mode value
-        :return: mode
-        """
-        for mode in cls:
-            if mode.value == value:
-                return mode
-        raise ValueError(f'invalid invoke from value {value}')
-
-    def to_source(self) -> str:
-        """
-        Get source of invoke from.
-
-        :return: source
-        """
-        if self == InvokeFrom.WEB_APP:
-            return 'web_app'
-        elif self == InvokeFrom.DEBUGGER:
-            return 'dev'
-        elif self == InvokeFrom.EXPLORE:
-            return 'explore_app'
-        elif self == InvokeFrom.SERVICE_API:
-            return 'api'
-
-        return 'dev'
+    external_data_variables: list[ExternalDataVariableEntity] = []
 
 
-class ApplicationGenerateEntity(BaseModel):
+class WorkflowUIBasedAppConfig(AppConfig):
     """
-    Application Generate Entity.
+    Workflow UI Based App Config Entity.
     """
-    task_id: str
-    tenant_id: str
-
-    app_id: str
-    app_model_config_id: str
-    # for save
-    app_model_config_dict: dict
-    app_model_config_override: bool
-
-    # Converted from app_model_config to Entity object, or directly covered by external input
-    app_orchestration_config_entity: AppOrchestrationConfigEntity
-
-    conversation_id: Optional[str] = None
-    inputs: dict[str, str]
-    query: Optional[str] = None
-    files: list[FileObj] = []
-    user_id: str
-    # extras
-    stream: bool
-    invoke_from: InvokeFrom
-
-    # extra parameters, like: auto_generate_conversation_name
-    extras: dict[str, Any] = {}
+    workflow_id: str

+ 0 - 0
api/core/app/app_config/features/__init__.py


+ 0 - 0
api/core/app/app_config/features/file_upload/__init__.py


+ 68 - 0
api/core/app/app_config/features/file_upload/manager.py

@@ -0,0 +1,68 @@
+from typing import Optional
+
+from core.app.app_config.entities import FileExtraConfig
+
+
+class FileUploadConfigManager:
+    @classmethod
+    def convert(cls, config: dict, is_vision: bool = True) -> Optional[FileExtraConfig]:
+        """
+        Convert model config to model config
+
+        :param config: model config args
+        :param is_vision: if True, the feature is vision feature
+        """
+        file_upload_dict = config.get('file_upload')
+        if file_upload_dict:
+            if 'image' in file_upload_dict and file_upload_dict['image']:
+                if 'enabled' in file_upload_dict['image'] and file_upload_dict['image']['enabled']:
+                    image_config = {
+                        'number_limits': file_upload_dict['image']['number_limits'],
+                        'transfer_methods': file_upload_dict['image']['transfer_methods']
+                    }
+
+                    if is_vision:
+                        image_config['detail'] = file_upload_dict['image']['detail']
+
+                    return FileExtraConfig(
+                        image_config=image_config
+                    )
+
+        return None
+
+    @classmethod
+    def validate_and_set_defaults(cls, config: dict, is_vision: bool = True) -> tuple[dict, list[str]]:
+        """
+        Validate and set defaults for file upload feature
+
+        :param config: app model config args
+        :param is_vision: if True, the feature is vision feature
+        """
+        if not config.get("file_upload"):
+            config["file_upload"] = {}
+
+        if not isinstance(config["file_upload"], dict):
+            raise ValueError("file_upload must be of dict type")
+
+        # check image config
+        if not config["file_upload"].get("image"):
+            config["file_upload"]["image"] = {"enabled": False}
+
+        if config['file_upload']['image']['enabled']:
+            number_limits = config['file_upload']['image']['number_limits']
+            if number_limits < 1 or number_limits > 6:
+                raise ValueError("number_limits must be in [1, 6]")
+
+            if is_vision:
+                detail = config['file_upload']['image']['detail']
+                if detail not in ['high', 'low']:
+                    raise ValueError("detail must be in ['high', 'low']")
+
+            transfer_methods = config['file_upload']['image']['transfer_methods']
+            if not isinstance(transfer_methods, list):
+                raise ValueError("transfer_methods must be of list type")
+            for method in transfer_methods:
+                if method not in ['remote_url', 'local_file']:
+                    raise ValueError("transfer_methods must be in ['remote_url', 'local_file']")
+
+        return config, ["file_upload"]

+ 0 - 0
api/core/app/app_config/features/more_like_this/__init__.py


+ 38 - 0
api/core/app/app_config/features/more_like_this/manager.py

@@ -0,0 +1,38 @@
+class MoreLikeThisConfigManager:
+    @classmethod
+    def convert(cls, config: dict) -> bool:
+        """
+        Convert model config to model config
+
+        :param config: model config args
+        """
+        more_like_this = False
+        more_like_this_dict = config.get('more_like_this')
+        if more_like_this_dict:
+            if 'enabled' in more_like_this_dict and more_like_this_dict['enabled']:
+                more_like_this = True
+
+        return more_like_this
+
+    @classmethod
+    def validate_and_set_defaults(cls, config: dict) -> tuple[dict, list[str]]:
+        """
+        Validate and set defaults for more like this feature
+
+        :param config: app model config args
+        """
+        if not config.get("more_like_this"):
+            config["more_like_this"] = {
+                "enabled": False
+            }
+
+        if not isinstance(config["more_like_this"], dict):
+            raise ValueError("more_like_this must be of dict type")
+
+        if "enabled" not in config["more_like_this"] or not config["more_like_this"]["enabled"]:
+            config["more_like_this"]["enabled"] = False
+
+        if not isinstance(config["more_like_this"]["enabled"], bool):
+            raise ValueError("enabled in more_like_this must be of boolean type")
+
+        return config, ["more_like_this"]

+ 0 - 0
api/core/app/app_config/features/opening_statement/__init__.py


+ 43 - 0
api/core/app/app_config/features/opening_statement/manager.py

@@ -0,0 +1,43 @@
+
+
+class OpeningStatementConfigManager:
+    @classmethod
+    def convert(cls, config: dict) -> tuple[str, list]:
+        """
+        Convert model config to model config
+
+        :param config: model config args
+        """
+        # opening statement
+        opening_statement = config.get('opening_statement')
+
+        # suggested questions
+        suggested_questions_list = config.get('suggested_questions')
+
+        return opening_statement, suggested_questions_list
+
+    @classmethod
+    def validate_and_set_defaults(cls, config: dict) -> tuple[dict, list[str]]:
+        """
+        Validate and set defaults for opening statement feature
+
+        :param config: app model config args
+        """
+        if not config.get("opening_statement"):
+            config["opening_statement"] = ""
+
+        if not isinstance(config["opening_statement"], str):
+            raise ValueError("opening_statement must be of string type")
+
+        # suggested_questions
+        if not config.get("suggested_questions"):
+            config["suggested_questions"] = []
+
+        if not isinstance(config["suggested_questions"], list):
+            raise ValueError("suggested_questions must be of list type")
+
+        for question in config["suggested_questions"]:
+            if not isinstance(question, str):
+                raise ValueError("Elements in suggested_questions list must be of string type")
+
+        return config, ["opening_statement", "suggested_questions"]

+ 0 - 0
api/core/app/app_config/features/retrieval_resource/__init__.py


+ 33 - 0
api/core/app/app_config/features/retrieval_resource/manager.py

@@ -0,0 +1,33 @@
+class RetrievalResourceConfigManager:
+    @classmethod
+    def convert(cls, config: dict) -> bool:
+        show_retrieve_source = False
+        retriever_resource_dict = config.get('retriever_resource')
+        if retriever_resource_dict:
+            if 'enabled' in retriever_resource_dict and retriever_resource_dict['enabled']:
+                show_retrieve_source = True
+
+        return show_retrieve_source
+
+    @classmethod
+    def validate_and_set_defaults(cls, config: dict) -> tuple[dict, list[str]]:
+        """
+        Validate and set defaults for retriever resource feature
+
+        :param config: app model config args
+        """
+        if not config.get("retriever_resource"):
+            config["retriever_resource"] = {
+                "enabled": False
+            }
+
+        if not isinstance(config["retriever_resource"], dict):
+            raise ValueError("retriever_resource must be of dict type")
+
+        if "enabled" not in config["retriever_resource"] or not config["retriever_resource"]["enabled"]:
+            config["retriever_resource"]["enabled"] = False
+
+        if not isinstance(config["retriever_resource"]["enabled"], bool):
+            raise ValueError("enabled in retriever_resource must be of boolean type")
+
+        return config, ["retriever_resource"]

+ 0 - 0
api/core/app/app_config/features/speech_to_text/__init__.py


+ 38 - 0
api/core/app/app_config/features/speech_to_text/manager.py

@@ -0,0 +1,38 @@
+class SpeechToTextConfigManager:
+    @classmethod
+    def convert(cls, config: dict) -> bool:
+        """
+        Convert model config to model config
+
+        :param config: model config args
+        """
+        speech_to_text = False
+        speech_to_text_dict = config.get('speech_to_text')
+        if speech_to_text_dict:
+            if 'enabled' in speech_to_text_dict and speech_to_text_dict['enabled']:
+                speech_to_text = True
+
+        return speech_to_text
+
+    @classmethod
+    def validate_and_set_defaults(cls, config: dict) -> tuple[dict, list[str]]:
+        """
+        Validate and set defaults for speech to text feature
+
+        :param config: app model config args
+        """
+        if not config.get("speech_to_text"):
+            config["speech_to_text"] = {
+                "enabled": False
+            }
+
+        if not isinstance(config["speech_to_text"], dict):
+            raise ValueError("speech_to_text must be of dict type")
+
+        if "enabled" not in config["speech_to_text"] or not config["speech_to_text"]["enabled"]:
+            config["speech_to_text"]["enabled"] = False
+
+        if not isinstance(config["speech_to_text"]["enabled"], bool):
+            raise ValueError("enabled in speech_to_text must be of boolean type")
+
+        return config, ["speech_to_text"]

+ 0 - 0
api/core/app/app_config/features/suggested_questions_after_answer/__init__.py


+ 39 - 0
api/core/app/app_config/features/suggested_questions_after_answer/manager.py

@@ -0,0 +1,39 @@
+class SuggestedQuestionsAfterAnswerConfigManager:
+    @classmethod
+    def convert(cls, config: dict) -> bool:
+        """
+        Convert model config to model config
+
+        :param config: model config args
+        """
+        suggested_questions_after_answer = False
+        suggested_questions_after_answer_dict = config.get('suggested_questions_after_answer')
+        if suggested_questions_after_answer_dict:
+            if 'enabled' in suggested_questions_after_answer_dict and suggested_questions_after_answer_dict['enabled']:
+                suggested_questions_after_answer = True
+
+        return suggested_questions_after_answer
+
+    @classmethod
+    def validate_and_set_defaults(cls, config: dict) -> tuple[dict, list[str]]:
+        """
+        Validate and set defaults for suggested questions feature
+
+        :param config: app model config args
+        """
+        if not config.get("suggested_questions_after_answer"):
+            config["suggested_questions_after_answer"] = {
+                "enabled": False
+            }
+
+        if not isinstance(config["suggested_questions_after_answer"], dict):
+            raise ValueError("suggested_questions_after_answer must be of dict type")
+
+        if "enabled" not in config["suggested_questions_after_answer"] or not \
+        config["suggested_questions_after_answer"]["enabled"]:
+            config["suggested_questions_after_answer"]["enabled"] = False
+
+        if not isinstance(config["suggested_questions_after_answer"]["enabled"], bool):
+            raise ValueError("enabled in suggested_questions_after_answer must be of boolean type")
+
+        return config, ["suggested_questions_after_answer"]

+ 0 - 0
api/core/app/app_config/features/text_to_speech/__init__.py


+ 49 - 0
api/core/app/app_config/features/text_to_speech/manager.py

@@ -0,0 +1,49 @@
+from core.app.app_config.entities import TextToSpeechEntity
+
+
+class TextToSpeechConfigManager:
+    @classmethod
+    def convert(cls, config: dict) -> bool:
+        """
+        Convert model config to model config
+
+        :param config: model config args
+        """
+        text_to_speech = False
+        text_to_speech_dict = config.get('text_to_speech')
+        if text_to_speech_dict:
+            if 'enabled' in text_to_speech_dict and text_to_speech_dict['enabled']:
+                text_to_speech = TextToSpeechEntity(
+                    enabled=text_to_speech_dict.get('enabled'),
+                    voice=text_to_speech_dict.get('voice'),
+                    language=text_to_speech_dict.get('language'),
+                )
+
+        return text_to_speech
+
+    @classmethod
+    def validate_and_set_defaults(cls, config: dict) -> tuple[dict, list[str]]:
+        """
+        Validate and set defaults for text to speech feature
+
+        :param config: app model config args
+        """
+        if not config.get("text_to_speech"):
+            config["text_to_speech"] = {
+                "enabled": False,
+                "voice": "",
+                "language": ""
+            }
+
+        if not isinstance(config["text_to_speech"], dict):
+            raise ValueError("text_to_speech must be of dict type")
+
+        if "enabled" not in config["text_to_speech"] or not config["text_to_speech"]["enabled"]:
+            config["text_to_speech"]["enabled"] = False
+            config["text_to_speech"]["voice"] = ""
+            config["text_to_speech"]["language"] = ""
+
+        if not isinstance(config["text_to_speech"]["enabled"], bool):
+            raise ValueError("enabled in text_to_speech must be of boolean type")
+
+        return config, ["text_to_speech"]

+ 0 - 0
api/core/app/app_config/workflow_ui_based_app/__init__.py


+ 0 - 0
api/core/app/app_config/workflow_ui_based_app/variables/__init__.py


+ 22 - 0
api/core/app/app_config/workflow_ui_based_app/variables/manager.py

@@ -0,0 +1,22 @@
+from core.app.app_config.entities import VariableEntity
+from models.workflow import Workflow
+
+
+class WorkflowVariablesConfigManager:
+    @classmethod
+    def convert(cls, workflow: Workflow) -> list[VariableEntity]:
+        """
+        Convert workflow start variables to variables
+
+        :param workflow: workflow instance
+        """
+        variables = []
+
+        # find start node
+        user_input_form = workflow.user_input_form()
+
+        # variables
+        for variable in user_input_form:
+            variables.append(VariableEntity(**variable))
+
+        return variables

Some files were not shown because too many files changed in this diff