Prechádzať zdrojové kódy

chore(lint): fix quotes for f-string formatting by bumping ruff to 0.9.x (#12702)

Bowen Liang 3 mesiacov pred
rodič
commit
166221d784
46 zmenil súbory, kde vykonal 120 pridanie a 131 odobranie
  1. 2 0
      api/.ruff.toml
  2. 1 1
      api/configs/feature/__init__.py
  3. 1 1
      api/configs/feature/hosted_service/__init__.py
  4. 1 1
      api/controllers/console/admin.py
  5. 1 1
      api/controllers/console/datasets/datasets.py
  6. 2 4
      api/controllers/console/datasets/datasets_document.py
  7. 4 8
      api/controllers/console/datasets/datasets_segments.py
  8. 3 6
      api/controllers/service_api/dataset/segment.py
  9. 1 2
      api/core/app/apps/base_app_queue_manager.py
  10. 1 1
      api/core/app/task_pipeline/message_cycle_manage.py
  11. 3 2
      api/core/external_data_tool/api/api.py
  12. 1 1
      api/core/file/models.py
  13. 1 1
      api/core/model_runtime/model_providers/azure_openai/llm/llm.py
  14. 1 1
      api/core/model_runtime/model_providers/azure_openai/text_embedding/text_embedding.py
  15. 2 2
      api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py
  16. 1 2
      api/core/model_runtime/model_providers/tongyi/llm/llm.py
  17. 1 1
      api/core/model_runtime/model_providers/triton_inference_server/llm/llm.py
  18. 5 5
      api/core/model_runtime/model_providers/wenxin/_common.py
  19. 2 2
      api/core/model_runtime/model_providers/xinference/llm/llm.py
  20. 1 1
      api/core/rag/extractor/firecrawl/firecrawl_app.py
  21. 1 2
      api/core/rag/extractor/notion_extractor.py
  22. 11 11
      api/core/tools/provider/builtin/aippt/tools/aippt.py
  23. 1 2
      api/core/tools/provider/builtin/aws/tools/nova_reel.py
  24. 1 1
      api/core/tools/provider/builtin/baidu_translate/tools/fieldtranslate.py
  25. 1 1
      api/core/tools/provider/builtin/baidu_translate/tools/language.py
  26. 1 1
      api/core/tools/provider/builtin/baidu_translate/tools/translate.py
  27. 17 17
      api/core/tools/provider/builtin/bing/tools/bing_web_search.py
  28. 1 1
      api/core/tools/provider/builtin/did/did_appx.py
  29. 2 2
      api/core/tools/provider/builtin/firecrawl/firecrawl_appx.py
  30. 3 2
      api/core/tools/provider/builtin/gaode/tools/gaode_weather.py
  31. 1 1
      api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py
  32. 1 1
      api/core/tools/provider/builtin/stability/tools/base.py
  33. 1 1
      api/core/tools/provider/builtin/vanna/vanna.py
  34. 2 2
      api/core/tools/tool/api_tool.py
  35. 2 2
      api/core/tools/utils/message_transformer.py
  36. 1 1
      api/core/tools/utils/parser.py
  37. 4 4
      api/core/workflow/nodes/http_request/executor.py
  38. 1 1
      api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py
  39. 20 20
      api/poetry.lock
  40. 1 1
      api/pyproject.toml
  41. 1 2
      api/services/dataset_service.py
  42. 1 1
      api/services/external_knowledge_service.py
  43. 3 3
      api/tests/artifact_tests/dependencies/test_dependencies_sorted.py
  44. 3 3
      api/tests/integration_tests/vdb/opensearch/test_opensearch.py
  45. 3 3
      api/tests/unit_tests/core/workflow/nodes/llm/test_node.py
  46. 1 2
      api/tests/unit_tests/services/workflow/test_workflow_converter.py

+ 2 - 0
api/.ruff.toml

@@ -53,10 +53,12 @@ ignore = [
     "FURB152", # math-constant
     "UP007", # non-pep604-annotation
     "UP032", # f-string
+    "UP045", # non-pep604-annotation-optional
     "B005", # strip-with-multi-characters
     "B006", # mutable-argument-default
     "B007", # unused-loop-control-variable
     "B026", # star-arg-unpacking-after-keyword-arg
+    "B903", # class-as-data-structure
     "B904", # raise-without-from-inside-except
     "B905", # zip-without-explicit-strict
     "N806", # non-lowercase-variable-in-function

+ 1 - 1
api/configs/feature/__init__.py

@@ -146,7 +146,7 @@ class EndpointConfig(BaseSettings):
     )
 
     CONSOLE_WEB_URL: str = Field(
-        description="Base URL for the console web interface," "used for frontend references and CORS configuration",
+        description="Base URL for the console web interface,used for frontend references and CORS configuration",
         default="",
     )
 

+ 1 - 1
api/configs/feature/hosted_service/__init__.py

@@ -181,7 +181,7 @@ class HostedFetchAppTemplateConfig(BaseSettings):
     """
 
     HOSTED_FETCH_APP_TEMPLATES_MODE: str = Field(
-        description="Mode for fetching app templates: remote, db, or builtin" " default to remote,",
+        description="Mode for fetching app templates: remote, db, or builtin default to remote,",
         default="remote",
     )
 

+ 1 - 1
api/controllers/console/admin.py

@@ -56,7 +56,7 @@ class InsertExploreAppListApi(Resource):
 
         app = App.query.filter(App.id == args["app_id"]).first()
         if not app:
-            raise NotFound(f'App \'{args["app_id"]}\' is not found')
+            raise NotFound(f"App '{args['app_id']}' is not found")
 
         site = app.site
         if not site:

+ 1 - 1
api/controllers/console/datasets/datasets.py

@@ -457,7 +457,7 @@ class DatasetIndexingEstimateApi(Resource):
             )
         except LLMBadRequestError:
             raise ProviderNotInitializeError(
-                "No Embedding Model available. Please configure a valid provider " "in the Settings -> Model Provider."
+                "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
             )
         except ProviderTokenNotInitError as ex:
             raise ProviderNotInitializeError(ex.description)

+ 2 - 4
api/controllers/console/datasets/datasets_document.py

@@ -350,8 +350,7 @@ class DatasetInitApi(Resource):
                 )
             except InvokeAuthorizationError:
                 raise ProviderNotInitializeError(
-                    "No Embedding Model available. Please configure a valid provider "
-                    "in the Settings -> Model Provider."
+                    "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
                 )
             except ProviderTokenNotInitError as ex:
                 raise ProviderNotInitializeError(ex.description)
@@ -526,8 +525,7 @@ class DocumentBatchIndexingEstimateApi(DocumentResource):
                 return response.model_dump(), 200
             except LLMBadRequestError:
                 raise ProviderNotInitializeError(
-                    "No Embedding Model available. Please configure a valid provider "
-                    "in the Settings -> Model Provider."
+                    "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
                 )
             except ProviderTokenNotInitError as ex:
                 raise ProviderNotInitializeError(ex.description)

+ 4 - 8
api/controllers/console/datasets/datasets_segments.py

@@ -168,8 +168,7 @@ class DatasetDocumentSegmentApi(Resource):
                 )
             except LLMBadRequestError:
                 raise ProviderNotInitializeError(
-                    "No Embedding Model available. Please configure a valid provider "
-                    "in the Settings -> Model Provider."
+                    "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
                 )
             except ProviderTokenNotInitError as ex:
                 raise ProviderNotInitializeError(ex.description)
@@ -217,8 +216,7 @@ class DatasetDocumentSegmentAddApi(Resource):
                 )
             except LLMBadRequestError:
                 raise ProviderNotInitializeError(
-                    "No Embedding Model available. Please configure a valid provider "
-                    "in the Settings -> Model Provider."
+                    "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
                 )
             except ProviderTokenNotInitError as ex:
                 raise ProviderNotInitializeError(ex.description)
@@ -267,8 +265,7 @@ class DatasetDocumentSegmentUpdateApi(Resource):
                 )
             except LLMBadRequestError:
                 raise ProviderNotInitializeError(
-                    "No Embedding Model available. Please configure a valid provider "
-                    "in the Settings -> Model Provider."
+                    "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
                 )
             except ProviderTokenNotInitError as ex:
                 raise ProviderNotInitializeError(ex.description)
@@ -437,8 +434,7 @@ class ChildChunkAddApi(Resource):
                 )
             except LLMBadRequestError:
                 raise ProviderNotInitializeError(
-                    "No Embedding Model available. Please configure a valid provider "
-                    "in the Settings -> Model Provider."
+                    "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
                 )
             except ProviderTokenNotInitError as ex:
                 raise ProviderNotInitializeError(ex.description)

+ 3 - 6
api/controllers/service_api/dataset/segment.py

@@ -53,8 +53,7 @@ class SegmentApi(DatasetApiResource):
                 )
             except LLMBadRequestError:
                 raise ProviderNotInitializeError(
-                    "No Embedding Model available. Please configure a valid provider "
-                    "in the Settings -> Model Provider."
+                    "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
                 )
             except ProviderTokenNotInitError as ex:
                 raise ProviderNotInitializeError(ex.description)
@@ -95,8 +94,7 @@ class SegmentApi(DatasetApiResource):
                 )
             except LLMBadRequestError:
                 raise ProviderNotInitializeError(
-                    "No Embedding Model available. Please configure a valid provider "
-                    "in the Settings -> Model Provider."
+                    "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
                 )
             except ProviderTokenNotInitError as ex:
                 raise ProviderNotInitializeError(ex.description)
@@ -175,8 +173,7 @@ class DatasetSegmentApi(DatasetApiResource):
                 )
             except LLMBadRequestError:
                 raise ProviderNotInitializeError(
-                    "No Embedding Model available. Please configure a valid provider "
-                    "in the Settings -> Model Provider."
+                    "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
                 )
             except ProviderTokenNotInitError as ex:
                 raise ProviderNotInitializeError(ex.description)

+ 1 - 2
api/core/app/apps/base_app_queue_manager.py

@@ -167,8 +167,7 @@ class AppQueueManager:
         else:
             if isinstance(data, DeclarativeMeta) or hasattr(data, "_sa_instance_state"):
                 raise TypeError(
-                    "Critical Error: Passing SQLAlchemy Model instances "
-                    "that cause thread safety issues is not allowed."
+                    "Critical Error: Passing SQLAlchemy Model instances that cause thread safety issues is not allowed."
                 )
 
 

+ 1 - 1
api/core/app/task_pipeline/message_cycle_manage.py

@@ -145,7 +145,7 @@ class MessageCycleManage:
 
             # get extension
             if "." in message_file.url:
-                extension = f'.{message_file.url.split(".")[-1]}'
+                extension = f".{message_file.url.split('.')[-1]}"
                 if len(extension) > 10:
                     extension = ".bin"
             else:

+ 3 - 2
api/core/external_data_tool/api/api.py

@@ -62,8 +62,9 @@ class ApiExternalDataTool(ExternalDataTool):
 
         if not api_based_extension:
             raise ValueError(
-                "[External data tool] API query failed, variable: {}, "
-                "error: api_based_extension_id is invalid".format(self.variable)
+                "[External data tool] API query failed, variable: {}, error: api_based_extension_id is invalid".format(
+                    self.variable
+                )
             )
 
         # decrypt api_key

+ 1 - 1
api/core/file/models.py

@@ -90,7 +90,7 @@ class File(BaseModel):
     def markdown(self) -> str:
         url = self.generate_url()
         if self.type == FileType.IMAGE:
-            text = f'![{self.filename or ""}]({url})'
+            text = f"![{self.filename or ''}]({url})"
         else:
             text = f"[{self.filename or url}]({url})"
 

+ 1 - 1
api/core/model_runtime/model_providers/azure_openai/llm/llm.py

@@ -108,7 +108,7 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
         ai_model_entity = self._get_ai_model_entity(base_model_name=base_model_name, model=model)
 
         if not ai_model_entity:
-            raise CredentialsValidateFailedError(f'Base Model Name {credentials["base_model_name"]} is invalid')
+            raise CredentialsValidateFailedError(f"Base Model Name {credentials['base_model_name']} is invalid")
 
         try:
             client = AzureOpenAI(**self._to_credential_kwargs(credentials))

+ 1 - 1
api/core/model_runtime/model_providers/azure_openai/text_embedding/text_embedding.py

@@ -130,7 +130,7 @@ class AzureOpenAITextEmbeddingModel(_CommonAzureOpenAI, TextEmbeddingModel):
             raise CredentialsValidateFailedError("Base Model Name is required")
 
         if not self._get_ai_model_entity(credentials["base_model_name"], model):
-            raise CredentialsValidateFailedError(f'Base Model Name {credentials["base_model_name"]} is invalid')
+            raise CredentialsValidateFailedError(f"Base Model Name {credentials['base_model_name']} is invalid")
 
         try:
             credentials_kwargs = self._to_credential_kwargs(credentials)

+ 2 - 2
api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py

@@ -162,9 +162,9 @@ class HuggingfaceHubTextEmbeddingModel(_CommonHuggingfaceHub, TextEmbeddingModel
     @staticmethod
     def _check_endpoint_url_model_repository_name(credentials: dict, model_name: str):
         try:
-            url = f'{HUGGINGFACE_ENDPOINT_API}{credentials["huggingface_namespace"]}'
+            url = f"{HUGGINGFACE_ENDPOINT_API}{credentials['huggingface_namespace']}"
             headers = {
-                "Authorization": f'Bearer {credentials["huggingfacehub_api_token"]}',
+                "Authorization": f"Bearer {credentials['huggingfacehub_api_token']}",
                 "Content-Type": "application/json",
             }
 

+ 1 - 2
api/core/model_runtime/model_providers/tongyi/llm/llm.py

@@ -257,8 +257,7 @@ class TongyiLargeLanguageModel(LargeLanguageModel):
         for index, response in enumerate(responses):
             if response.status_code not in {200, HTTPStatus.OK}:
                 raise ServiceUnavailableError(
-                    f"Failed to invoke model {model}, status code: {response.status_code}, "
-                    f"message: {response.message}"
+                    f"Failed to invoke model {model}, status code: {response.status_code}, message: {response.message}"
                 )
 
             resp_finish_reason = response.output.choices[0].finish_reason

+ 1 - 1
api/core/model_runtime/model_providers/triton_inference_server/llm/llm.py

@@ -146,7 +146,7 @@ class TritonInferenceAILargeLanguageModel(LargeLanguageModel):
             elif credentials["completion_type"] == "completion":
                 completion_type = LLMMode.COMPLETION.value
             else:
-                raise ValueError(f'completion_type {credentials["completion_type"]} is not supported')
+                raise ValueError(f"completion_type {credentials['completion_type']} is not supported")
 
         entity = AIModelEntity(
             model=model,

+ 5 - 5
api/core/model_runtime/model_providers/wenxin/_common.py

@@ -41,15 +41,15 @@ class BaiduAccessToken:
         resp = response.json()
         if "error" in resp:
             if resp["error"] == "invalid_client":
-                raise InvalidAPIKeyError(f'Invalid API key or secret key: {resp["error_description"]}')
+                raise InvalidAPIKeyError(f"Invalid API key or secret key: {resp['error_description']}")
             elif resp["error"] == "unknown_error":
-                raise InternalServerError(f'Internal server error: {resp["error_description"]}')
+                raise InternalServerError(f"Internal server error: {resp['error_description']}")
             elif resp["error"] == "invalid_request":
-                raise BadRequestError(f'Bad request: {resp["error_description"]}')
+                raise BadRequestError(f"Bad request: {resp['error_description']}")
             elif resp["error"] == "rate_limit_exceeded":
-                raise RateLimitReachedError(f'Rate limit reached: {resp["error_description"]}')
+                raise RateLimitReachedError(f"Rate limit reached: {resp['error_description']}")
             else:
-                raise Exception(f'Unknown error: {resp["error_description"]}')
+                raise Exception(f"Unknown error: {resp['error_description']}")
 
         return resp["access_token"]
 

+ 2 - 2
api/core/model_runtime/model_providers/xinference/llm/llm.py

@@ -406,7 +406,7 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
             elif credentials["completion_type"] == "completion":
                 completion_type = LLMMode.COMPLETION.value
             else:
-                raise ValueError(f'completion_type {credentials["completion_type"]} is not supported')
+                raise ValueError(f"completion_type {credentials['completion_type']} is not supported")
         else:
             extra_args = XinferenceHelper.get_xinference_extra_parameter(
                 server_url=credentials["server_url"],
@@ -472,7 +472,7 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
         api_key = credentials.get("api_key") or "abc"
 
         client = OpenAI(
-            base_url=f'{credentials["server_url"]}/v1',
+            base_url=f"{credentials['server_url']}/v1",
             api_key=api_key,
             max_retries=int(credentials.get("max_retries") or DEFAULT_MAX_RETRIES),
             timeout=int(credentials.get("invoke_timeout") or DEFAULT_INVOKE_TIMEOUT),

+ 1 - 1
api/core/rag/extractor/firecrawl/firecrawl_app.py

@@ -31,7 +31,7 @@ class FirecrawlApp:
                     "markdown": data.get("markdown"),
                 }
             else:
-                raise Exception(f'Failed to scrape URL. Error: {response_data["error"]}')
+                raise Exception(f"Failed to scrape URL. Error: {response_data['error']}")
 
         elif response.status_code in {402, 409, 500}:
             error_message = response.json().get("error", "Unknown error occurred")

+ 1 - 2
api/core/rag/extractor/notion_extractor.py

@@ -358,8 +358,7 @@ class NotionExtractor(BaseExtractor):
 
         if not data_source_binding:
             raise Exception(
-                f"No notion data source binding found for tenant {tenant_id} "
-                f"and notion workspace {notion_workspace_id}"
+                f"No notion data source binding found for tenant {tenant_id} and notion workspace {notion_workspace_id}"
             )
 
         return cast(str, data_source_binding.access_token)

+ 11 - 11
api/core/tools/provider/builtin/aippt/tools/aippt.py

@@ -127,7 +127,7 @@ class AIPPTGenerateToolAdapter:
 
         response = response.json()
         if response.get("code") != 0:
-            raise Exception(f'Failed to create task: {response.get("msg")}')
+            raise Exception(f"Failed to create task: {response.get('msg')}")
 
         return response.get("data", {}).get("id")
 
@@ -222,7 +222,7 @@ class AIPPTGenerateToolAdapter:
         elif model == "wenxin":
             response = response.json()
             if response.get("code") != 0:
-                raise Exception(f'Failed to generate content: {response.get("msg")}')
+                raise Exception(f"Failed to generate content: {response.get('msg')}")
 
             return response.get("data", "")
 
@@ -254,7 +254,7 @@ class AIPPTGenerateToolAdapter:
 
         response = response.json()
         if response.get("code") != 0:
-            raise Exception(f'Failed to generate ppt: {response.get("msg")}')
+            raise Exception(f"Failed to generate ppt: {response.get('msg')}")
 
         id = response.get("data", {}).get("id")
         cover_url = response.get("data", {}).get("cover_url")
@@ -270,7 +270,7 @@ class AIPPTGenerateToolAdapter:
 
         response = response.json()
         if response.get("code") != 0:
-            raise Exception(f'Failed to generate ppt: {response.get("msg")}')
+            raise Exception(f"Failed to generate ppt: {response.get('msg')}")
 
         export_code = response.get("data")
         if not export_code:
@@ -290,7 +290,7 @@ class AIPPTGenerateToolAdapter:
 
             response = response.json()
             if response.get("code") != 0:
-                raise Exception(f'Failed to generate ppt: {response.get("msg")}')
+                raise Exception(f"Failed to generate ppt: {response.get('msg')}")
 
             if response.get("msg") == "导出中":
                 current_iteration += 1
@@ -343,7 +343,7 @@ class AIPPTGenerateToolAdapter:
             raise Exception(f"Failed to connect to aippt: {response.text}")
         response = response.json()
         if response.get("code") != 0:
-            raise Exception(f'Failed to connect to aippt: {response.get("msg")}')
+            raise Exception(f"Failed to connect to aippt: {response.get('msg')}")
 
         token = response.get("data", {}).get("token")
         expire = response.get("data", {}).get("time_expire")
@@ -379,7 +379,7 @@ class AIPPTGenerateToolAdapter:
                 if cls._style_cache[key]["expire"] < now:
                     del cls._style_cache[key]
 
-            key = f'{credentials["aippt_access_key"]}#@#{user_id}'
+            key = f"{credentials['aippt_access_key']}#@#{user_id}"
             if key in cls._style_cache:
                 return cls._style_cache[key]["colors"], cls._style_cache[key]["styles"]
 
@@ -396,11 +396,11 @@ class AIPPTGenerateToolAdapter:
         response = response.json()
 
         if response.get("code") != 0:
-            raise Exception(f'Failed to connect to aippt: {response.get("msg")}')
+            raise Exception(f"Failed to connect to aippt: {response.get('msg')}")
 
         colors = [
             {
-                "id": f'id-{item.get("id")}',
+                "id": f"id-{item.get('id')}",
                 "name": item.get("name"),
                 "en_name": item.get("en_name", item.get("name")),
             }
@@ -408,7 +408,7 @@ class AIPPTGenerateToolAdapter:
         ]
         styles = [
             {
-                "id": f'id-{item.get("id")}',
+                "id": f"id-{item.get('id')}",
                 "name": item.get("title"),
             }
             for item in response.get("data", {}).get("suit_style") or []
@@ -454,7 +454,7 @@ class AIPPTGenerateToolAdapter:
         response = response.json()
 
         if response.get("code") != 0:
-            raise Exception(f'Failed to connect to aippt: {response.get("msg")}')
+            raise Exception(f"Failed to connect to aippt: {response.get('msg')}")
 
         if len(response.get("data", {}).get("list") or []) > 0:
             return response.get("data", {}).get("list")[0].get("id")

+ 1 - 2
api/core/tools/provider/builtin/aws/tools/nova_reel.py

@@ -229,8 +229,7 @@ class NovaReelTool(BuiltinTool):
 
         if async_mode:
             return self.create_text_message(
-                f"Video generation started.\nInvocation ARN: {invocation_arn}\n"
-                f"Video will be available at: {video_uri}"
+                f"Video generation started.\nInvocation ARN: {invocation_arn}\nVideo will be available at: {video_uri}"
             )
 
         return self._wait_for_completion(bedrock, s3_client, invocation_arn)

+ 1 - 1
api/core/tools/provider/builtin/baidu_translate/tools/fieldtranslate.py

@@ -65,7 +65,7 @@ class BaiduFieldTranslateTool(BuiltinTool, BaiduTranslateToolBase):
             if "trans_result" in result:
                 result_text = result["trans_result"][0]["dst"]
             else:
-                result_text = f'{result["error_code"]}: {result["error_msg"]}'
+                result_text = f"{result['error_code']}: {result['error_msg']}"
 
             return self.create_text_message(str(result_text))
         except requests.RequestException as e:

+ 1 - 1
api/core/tools/provider/builtin/baidu_translate/tools/language.py

@@ -52,7 +52,7 @@ class BaiduLanguageTool(BuiltinTool, BaiduTranslateToolBase):
 
             result_text = ""
             if result["error_code"] != 0:
-                result_text = f'{result["error_code"]}: {result["error_msg"]}'
+                result_text = f"{result['error_code']}: {result['error_msg']}"
             else:
                 result_text = result["data"]["src"]
                 result_text = self.mapping_result(description_language, result_text)

+ 1 - 1
api/core/tools/provider/builtin/baidu_translate/tools/translate.py

@@ -58,7 +58,7 @@ class BaiduTranslateTool(BuiltinTool, BaiduTranslateToolBase):
             if "trans_result" in result:
                 result_text = result["trans_result"][0]["dst"]
             else:
-                result_text = f'{result["error_code"]}: {result["error_msg"]}'
+                result_text = f"{result['error_code']}: {result['error_msg']}"
 
             return self.create_text_message(str(result_text))
         except requests.RequestException as e:

+ 17 - 17
api/core/tools/provider/builtin/bing/tools/bing_web_search.py

@@ -30,7 +30,7 @@ class BingSearchTool(BuiltinTool):
         headers = {"Ocp-Apim-Subscription-Key": subscription_key, "Accept-Language": accept_language}
 
         query = quote(query)
-        server_url = f'{server_url}?q={query}&mkt={market_code}&count={limit}&responseFilter={",".join(filters)}'
+        server_url = f"{server_url}?q={query}&mkt={market_code}&count={limit}&responseFilter={','.join(filters)}"
         response = get(server_url, headers=headers)
 
         if response.status_code != 200:
@@ -47,23 +47,23 @@ class BingSearchTool(BuiltinTool):
             results = []
             if search_results:
                 for result in search_results:
-                    url = f': {result["url"]}' if "url" in result else ""
-                    results.append(self.create_text_message(text=f'{result["name"]}{url}'))
+                    url = f": {result['url']}" if "url" in result else ""
+                    results.append(self.create_text_message(text=f"{result['name']}{url}"))
 
             if entities:
                 for entity in entities:
-                    url = f': {entity["url"]}' if "url" in entity else ""
-                    results.append(self.create_text_message(text=f'{entity.get("name", "")}{url}'))
+                    url = f": {entity['url']}" if "url" in entity else ""
+                    results.append(self.create_text_message(text=f"{entity.get('name', '')}{url}"))
 
             if news:
                 for news_item in news:
-                    url = f': {news_item["url"]}' if "url" in news_item else ""
-                    results.append(self.create_text_message(text=f'{news_item.get("name", "")}{url}'))
+                    url = f": {news_item['url']}" if "url" in news_item else ""
+                    results.append(self.create_text_message(text=f"{news_item.get('name', '')}{url}"))
 
             if related_searches:
                 for related in related_searches:
-                    url = f': {related["displayText"]}' if "displayText" in related else ""
-                    results.append(self.create_text_message(text=f'{related.get("displayText", "")}{url}'))
+                    url = f": {related['displayText']}" if "displayText" in related else ""
+                    results.append(self.create_text_message(text=f"{related.get('displayText', '')}{url}"))
 
             return results
         elif result_type == "json":
@@ -106,29 +106,29 @@ class BingSearchTool(BuiltinTool):
             text = ""
             if search_results:
                 for i, result in enumerate(search_results):
-                    text += f'{i + 1}: {result.get("name", "")} - {result.get("snippet", "")}\n'
+                    text += f"{i + 1}: {result.get('name', '')} - {result.get('snippet', '')}\n"
 
             if computation and "expression" in computation and "value" in computation:
                 text += "\nComputation:\n"
-                text += f'{computation["expression"]} = {computation["value"]}\n'
+                text += f"{computation['expression']} = {computation['value']}\n"
 
             if entities:
                 text += "\nEntities:\n"
                 for entity in entities:
-                    url = f'- {entity["url"]}' if "url" in entity else ""
-                    text += f'{entity.get("name", "")}{url}\n'
+                    url = f"- {entity['url']}" if "url" in entity else ""
+                    text += f"{entity.get('name', '')}{url}\n"
 
             if news:
                 text += "\nNews:\n"
                 for news_item in news:
-                    url = f'- {news_item["url"]}' if "url" in news_item else ""
-                    text += f'{news_item.get("name", "")}{url}\n'
+                    url = f"- {news_item['url']}" if "url" in news_item else ""
+                    text += f"{news_item.get('name', '')}{url}\n"
 
             if related_searches:
                 text += "\n\nRelated Searches:\n"
                 for related in related_searches:
-                    url = f'- {related["webSearchUrl"]}' if "webSearchUrl" in related else ""
-                    text += f'{related.get("displayText", "")}{url}\n'
+                    url = f"- {related['webSearchUrl']}" if "webSearchUrl" in related else ""
+                    text += f"{related.get('displayText', '')}{url}\n"
 
             return self.create_text_message(text=self.summary(user_id=user_id, content=text))
 

+ 1 - 1
api/core/tools/provider/builtin/did/did_appx.py

@@ -83,5 +83,5 @@ class DIDApp:
             if status["status"] == "done":
                 return status
             elif status["status"] == "error" or status["status"] == "rejected":
-                raise HTTPError(f'Talks {id} failed: {status["status"]} {status.get("error", {}).get("description")}')
+                raise HTTPError(f"Talks {id} failed: {status['status']} {status.get('error', {}).get('description')}")
             time.sleep(poll_interval)

+ 2 - 2
api/core/tools/provider/builtin/firecrawl/firecrawl_appx.py

@@ -74,7 +74,7 @@ class FirecrawlApp:
         if response is None:
             raise HTTPError("Failed to initiate crawl after multiple retries")
         elif response.get("success") == False:
-            raise HTTPError(f'Failed to crawl: {response.get("error")}')
+            raise HTTPError(f"Failed to crawl: {response.get('error')}")
         job_id: str = response["id"]
         if wait:
             return self._monitor_job_status(job_id=job_id, poll_interval=poll_interval)
@@ -100,7 +100,7 @@ class FirecrawlApp:
             if status["status"] == "completed":
                 return status
             elif status["status"] == "failed":
-                raise HTTPError(f'Job {job_id} failed: {status["error"]}')
+                raise HTTPError(f"Job {job_id} failed: {status['error']}")
             time.sleep(poll_interval)
 
 

+ 3 - 2
api/core/tools/provider/builtin/gaode/tools/gaode_weather.py

@@ -37,8 +37,9 @@ class GaodeRepositoriesTool(BuiltinTool):
                     CityCode = City_data["districts"][0]["adcode"]
                     weatherInfo_response = s.request(
                         method="GET",
-                        url="{url}/weather/weatherInfo?city={citycode}&extensions=all&key={apikey}&output=json"
-                        "".format(url=api_domain, citycode=CityCode, apikey=self.runtime.credentials.get("api_key")),
+                        url="{url}/weather/weatherInfo?city={citycode}&extensions=all&key={apikey}&output=json".format(
+                            url=api_domain, citycode=CityCode, apikey=self.runtime.credentials.get("api_key")
+                        ),
                     )
                     weatherInfo_data = weatherInfo_response.json()
                     if weatherInfo_response.status_code == 200 and weatherInfo_data.get("info") == "OK":

+ 1 - 1
api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py

@@ -110,7 +110,7 @@ class ListWorksheetRecordsTool(BuiltinTool):
                             result["rows"].append(self.get_row_field_value(row, schema))
                         return self.create_text_message(json.dumps(result, ensure_ascii=False))
                     else:
-                        result_text = f"Found {result['total']} rows in worksheet \"{worksheet_name}\"."
+                        result_text = f'Found {result["total"]} rows in worksheet "{worksheet_name}".'
                         if result["total"] > 0:
                             result_text += (
                                 f" The following are {min(limit, result['total'])}"

+ 1 - 1
api/core/tools/provider/builtin/stability/tools/base.py

@@ -28,4 +28,4 @@ class BaseStabilityAuthorization:
         """
         This method is responsible for generating the authorization headers.
         """
-        return {"Authorization": f'Bearer {credentials.get("api_key", "")}'}
+        return {"Authorization": f"Bearer {credentials.get('api_key', '')}"}

+ 1 - 1
api/core/tools/provider/builtin/vanna/vanna.py

@@ -38,7 +38,7 @@ class VannaProvider(BuiltinToolProviderController):
                 tool_parameters={
                     "model": "chinook",
                     "db_type": "SQLite",
-                    "url": f'{self._get_protocol_and_main_domain(credentials["base_url"])}/Chinook.sqlite',
+                    "url": f"{self._get_protocol_and_main_domain(credentials['base_url'])}/Chinook.sqlite",
                     "query": "What are the top 10 customers by sales?",
                 },
             )

+ 2 - 2
api/core/tools/tool/api_tool.py

@@ -84,9 +84,9 @@ class ApiTool(Tool):
             if "api_key_header_prefix" in credentials:
                 api_key_header_prefix = credentials["api_key_header_prefix"]
                 if api_key_header_prefix == "basic" and credentials["api_key_value"]:
-                    credentials["api_key_value"] = f'Basic {credentials["api_key_value"]}'
+                    credentials["api_key_value"] = f"Basic {credentials['api_key_value']}"
                 elif api_key_header_prefix == "bearer" and credentials["api_key_value"]:
-                    credentials["api_key_value"] = f'Bearer {credentials["api_key_value"]}'
+                    credentials["api_key_value"] = f"Bearer {credentials['api_key_value']}"
                 elif api_key_header_prefix == "custom":
                     pass
 

+ 2 - 2
api/core/tools/utils/message_transformer.py

@@ -29,7 +29,7 @@ class ToolFileMessageTransformer:
                         user_id=user_id, tenant_id=tenant_id, conversation_id=conversation_id, file_url=message.message
                     )
 
-                    url = f'/files/tools/{file.id}{guess_extension(file.mimetype) or ".png"}'
+                    url = f"/files/tools/{file.id}{guess_extension(file.mimetype) or '.png'}"
 
                     result.append(
                         ToolInvokeMessage(
@@ -122,4 +122,4 @@ class ToolFileMessageTransformer:
 
     @classmethod
     def get_tool_file_url(cls, tool_file_id: str, extension: Optional[str]) -> str:
-        return f'/files/tools/{tool_file_id}{extension or ".bin"}'
+        return f"/files/tools/{tool_file_id}{extension or '.bin'}"

+ 1 - 1
api/core/tools/utils/parser.py

@@ -149,7 +149,7 @@ class ApiBasedToolSchemaParser:
                 if not path:
                     path = str(uuid.uuid4())
 
-                interface["operation"]["operationId"] = f'{path}_{interface["method"]}'
+                interface["operation"]["operationId"] = f"{path}_{interface['method']}"
 
             bundles.append(
                 ApiToolBundle(

+ 4 - 4
api/core/workflow/nodes/http_request/executor.py

@@ -253,9 +253,9 @@ class Executor:
         )
         if executor_response.size > threshold_size:
             raise ResponseSizeError(
-                f'{"File" if executor_response.is_file else "Text"} size is too large,'
-                f' max size is {threshold_size / 1024 / 1024:.2f} MB,'
-                f' but current size is {executor_response.readable_size}.'
+                f"{'File' if executor_response.is_file else 'Text'} size is too large,"
+                f" max size is {threshold_size / 1024 / 1024:.2f} MB,"
+                f" but current size is {executor_response.readable_size}."
             )
 
         return executor_response
@@ -338,7 +338,7 @@ class Executor:
                 if self.auth.config and self.auth.config.header:
                     authorization_header = self.auth.config.header
                 if k.lower() == authorization_header.lower():
-                    raw += f'{k}: {"*" * len(v)}\r\n'
+                    raw += f"{k}: {'*' * len(v)}\r\n"
                     continue
             raw += f"{k}: {v}\r\n"
 

+ 1 - 1
api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py

@@ -26,7 +26,7 @@ def handle(sender, **kwargs):
                     tool_runtime=tool_runtime,
                     provider_name=tool_entity.provider_name,
                     provider_type=tool_entity.provider_type,
-                    identity_id=f'WORKFLOW.{app.id}.{node_data.get("id")}',
+                    identity_id=f"WORKFLOW.{app.id}.{node_data.get('id')}",
                 )
                 manager.delete_tool_parameters_cache()
             except:

+ 20 - 20
api/poetry.lock

@@ -8846,29 +8846,29 @@ pyasn1 = ">=0.1.3"
 
 [[package]]
 name = "ruff"
-version = "0.8.6"
+version = "0.9.2"
 description = "An extremely fast Python linter and code formatter, written in Rust."
 optional = false
 python-versions = ">=3.7"
 files = [
-    {file = "ruff-0.8.6-py3-none-linux_armv6l.whl", hash = "sha256:defed167955d42c68b407e8f2e6f56ba52520e790aba4ca707a9c88619e580e3"},
-    {file = "ruff-0.8.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:54799ca3d67ae5e0b7a7ac234baa657a9c1784b48ec954a094da7c206e0365b1"},
-    {file = "ruff-0.8.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:e88b8f6d901477c41559ba540beeb5a671e14cd29ebd5683903572f4b40a9807"},
-    {file = "ruff-0.8.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0509e8da430228236a18a677fcdb0c1f102dd26d5520f71f79b094963322ed25"},
-    {file = "ruff-0.8.6-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:91a7ddb221779871cf226100e677b5ea38c2d54e9e2c8ed847450ebbdf99b32d"},
-    {file = "ruff-0.8.6-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:248b1fb3f739d01d528cc50b35ee9c4812aa58cc5935998e776bf8ed5b251e75"},
-    {file = "ruff-0.8.6-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:bc3c083c50390cf69e7e1b5a5a7303898966be973664ec0c4a4acea82c1d4315"},
-    {file = "ruff-0.8.6-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52d587092ab8df308635762386f45f4638badb0866355b2b86760f6d3c076188"},
-    {file = "ruff-0.8.6-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:61323159cf21bc3897674e5adb27cd9e7700bab6b84de40d7be28c3d46dc67cf"},
-    {file = "ruff-0.8.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ae4478b1471fc0c44ed52a6fb787e641a2ac58b1c1f91763bafbc2faddc5117"},
-    {file = "ruff-0.8.6-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:0c000a471d519b3e6cfc9c6680025d923b4ca140ce3e4612d1a2ef58e11f11fe"},
-    {file = "ruff-0.8.6-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:9257aa841e9e8d9b727423086f0fa9a86b6b420fbf4bf9e1465d1250ce8e4d8d"},
-    {file = "ruff-0.8.6-py3-none-musllinux_1_2_i686.whl", hash = "sha256:45a56f61b24682f6f6709636949ae8cc82ae229d8d773b4c76c09ec83964a95a"},
-    {file = "ruff-0.8.6-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:496dd38a53aa173481a7d8866bcd6451bd934d06976a2505028a50583e001b76"},
-    {file = "ruff-0.8.6-py3-none-win32.whl", hash = "sha256:e169ea1b9eae61c99b257dc83b9ee6c76f89042752cb2d83486a7d6e48e8f764"},
-    {file = "ruff-0.8.6-py3-none-win_amd64.whl", hash = "sha256:f1d70bef3d16fdc897ee290d7d20da3cbe4e26349f62e8a0274e7a3f4ce7a905"},
-    {file = "ruff-0.8.6-py3-none-win_arm64.whl", hash = "sha256:7d7fc2377a04b6e04ffe588caad613d0c460eb2ecba4c0ccbbfe2bc973cbc162"},
-    {file = "ruff-0.8.6.tar.gz", hash = "sha256:dcad24b81b62650b0eb8814f576fc65cfee8674772a6e24c9b747911801eeaa5"},
+    {file = "ruff-0.9.2-py3-none-linux_armv6l.whl", hash = "sha256:80605a039ba1454d002b32139e4970becf84b5fee3a3c3bf1c2af6f61a784347"},
+    {file = "ruff-0.9.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b9aab82bb20afd5f596527045c01e6ae25a718ff1784cb92947bff1f83068b00"},
+    {file = "ruff-0.9.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:fbd337bac1cfa96be615f6efcd4bc4d077edbc127ef30e2b8ba2a27e18c054d4"},
+    {file = "ruff-0.9.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82b35259b0cbf8daa22a498018e300b9bb0174c2bbb7bcba593935158a78054d"},
+    {file = "ruff-0.9.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b6a9701d1e371bf41dca22015c3f89769da7576884d2add7317ec1ec8cb9c3c"},
+    {file = "ruff-0.9.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9cc53e68b3c5ae41e8faf83a3b89f4a5d7b2cb666dff4b366bb86ed2a85b481f"},
+    {file = "ruff-0.9.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:8efd9da7a1ee314b910da155ca7e8953094a7c10d0c0a39bfde3fcfd2a015684"},
+    {file = "ruff-0.9.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3292c5a22ea9a5f9a185e2d131dc7f98f8534a32fb6d2ee7b9944569239c648d"},
+    {file = "ruff-0.9.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a605fdcf6e8b2d39f9436d343d1f0ff70c365a1e681546de0104bef81ce88df"},
+    {file = "ruff-0.9.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c547f7f256aa366834829a08375c297fa63386cbe5f1459efaf174086b564247"},
+    {file = "ruff-0.9.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d18bba3d3353ed916e882521bc3e0af403949dbada344c20c16ea78f47af965e"},
+    {file = "ruff-0.9.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:b338edc4610142355ccf6b87bd356729b62bf1bc152a2fad5b0c7dc04af77bfe"},
+    {file = "ruff-0.9.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:492a5e44ad9b22a0ea98cf72e40305cbdaf27fac0d927f8bc9e1df316dcc96eb"},
+    {file = "ruff-0.9.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:af1e9e9fe7b1f767264d26b1075ac4ad831c7db976911fa362d09b2d0356426a"},
+    {file = "ruff-0.9.2-py3-none-win32.whl", hash = "sha256:71cbe22e178c5da20e1514e1e01029c73dc09288a8028a5d3446e6bba87a5145"},
+    {file = "ruff-0.9.2-py3-none-win_amd64.whl", hash = "sha256:c5e1d6abc798419cf46eed03f54f2e0c3adb1ad4b801119dedf23fcaf69b55b5"},
+    {file = "ruff-0.9.2-py3-none-win_arm64.whl", hash = "sha256:a1b63fa24149918f8b37cef2ee6fff81f24f0d74b6f0bdc37bc3e1f2143e41c6"},
+    {file = "ruff-0.9.2.tar.gz", hash = "sha256:b5eceb334d55fae5f316f783437392642ae18e16dcf4f1858d55d3c2a0f8f5d0"},
 ]
 
 [[package]]
@@ -11384,4 +11384,4 @@ cffi = ["cffi (>=1.11)"]
 [metadata]
 lock-version = "2.0"
 python-versions = ">=3.11,<3.13"
-content-hash = "3bb0ce64c87712cf105c75105a0ca75c0523d6b27001ff6a623bb2a0d1343003"
+content-hash = "3ac10f0687162281a0cd083a52cba5508b086dd42d63dd68175209e88b249142"

+ 1 - 1
api/pyproject.toml

@@ -191,4 +191,4 @@ pytest-mock = "~3.14.0"
 optional = true
 [tool.poetry.group.lint.dependencies]
 dotenv-linter = "~0.5.0"
-ruff = "~0.8.1"
+ruff = "~0.9.2"

+ 1 - 2
api/services/dataset_service.py

@@ -221,8 +221,7 @@ class DatasetService:
                 )
             except LLMBadRequestError:
                 raise ValueError(
-                    "No Embedding Model available. Please configure a valid provider "
-                    "in the Settings -> Model Provider."
+                    "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
                 )
             except ProviderTokenNotInitError as ex:
                 raise ValueError(f"The dataset in unavailable, due to: {ex.description}")

+ 1 - 1
api/services/external_knowledge_service.py

@@ -155,7 +155,7 @@ class ExternalDatasetService:
             if custom_parameters:
                 for parameter in custom_parameters:
                     if parameter.get("required", False) and not process_parameter.get(parameter.get("name")):
-                        raise ValueError(f'{parameter.get("name")} is required')
+                        raise ValueError(f"{parameter.get('name')} is required")
 
     @staticmethod
     def process_external_api(

+ 3 - 3
api/tests/artifact_tests/dependencies/test_dependencies_sorted.py

@@ -44,6 +44,6 @@ def test_duplicated_dependency_crossing_groups() -> None:
         dependency_names = list(dependencies.keys())
         all_dependency_names.extend(dependency_names)
     expected_all_dependency_names = set(all_dependency_names)
-    assert sorted(expected_all_dependency_names) == sorted(
-        all_dependency_names
-    ), "Duplicated dependencies crossing groups are found"
+    assert sorted(expected_all_dependency_names) == sorted(all_dependency_names), (
+        "Duplicated dependencies crossing groups are found"
+    )

+ 3 - 3
api/tests/integration_tests/vdb/opensearch/test_opensearch.py

@@ -89,9 +89,9 @@ class TestOpenSearchVector:
         print("Actual document ID:", hits_by_vector[0].metadata["document_id"] if hits_by_vector else "No hits")
 
         assert len(hits_by_vector) > 0, f"Expected at least one hit, got {len(hits_by_vector)}"
-        assert (
-            hits_by_vector[0].metadata["document_id"] == self.example_doc_id
-        ), f"Expected document ID {self.example_doc_id}, got {hits_by_vector[0].metadata['document_id']}"
+        assert hits_by_vector[0].metadata["document_id"] == self.example_doc_id, (
+            f"Expected document ID {self.example_doc_id}, got {hits_by_vector[0].metadata['document_id']}"
+        )
 
     def test_get_ids_by_metadata_field(self):
         mock_response = {"hits": {"total": {"value": 1}, "hits": [{"_id": "mock_id"}]}}

+ 3 - 3
api/tests/unit_tests/core/workflow/nodes/llm/test_node.py

@@ -438,9 +438,9 @@ def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
 
         # Verify the result
         assert len(prompt_messages) == len(scenario.expected_messages), f"Scenario failed: {scenario.description}"
-        assert (
-            prompt_messages == scenario.expected_messages
-        ), f"Message content mismatch in scenario: {scenario.description}"
+        assert prompt_messages == scenario.expected_messages, (
+            f"Message content mismatch in scenario: {scenario.description}"
+        )
 
 
 def test_handle_list_messages_basic(llm_node):

+ 1 - 2
api/tests/unit_tests/services/workflow/test_workflow_converter.py

@@ -401,8 +401,7 @@ def test__convert_to_llm_node_for_workflow_advanced_completion_model(default_var
     prompt_template = PromptTemplateEntity(
         prompt_type=PromptTemplateEntity.PromptType.ADVANCED,
         advanced_completion_prompt_template=AdvancedCompletionPromptTemplateEntity(
-            prompt="You are a helpful assistant named {{name}}.\n\nContext:\n{{#context#}}\n\n"
-            "Human: hi\nAssistant: ",
+            prompt="You are a helpful assistant named {{name}}.\n\nContext:\n{{#context#}}\n\nHuman: hi\nAssistant: ",
             role_prefix=AdvancedCompletionPromptTemplateEntity.RolePrefixEntity(user="Human", assistant="Assistant"),
         ),
     )