Sfoglia il codice sorgente

chore: fix unnecessary string concatation in single line (#8311)

Bowen Liang 7 mesi fa
parent
commit
6613b8f2e0
30 ha cambiato i file con 46 aggiunte e 49 eliminazioni
  1. 4 5
      api/commands.py
  2. 3 3
      api/configs/feature/__init__.py
  3. 1 1
      api/controllers/console/app/workflow.py
  4. 1 1
      api/controllers/console/datasets/datasets.py
  5. 1 3
      api/controllers/console/error.py
  6. 1 1
      api/controllers/console/workspace/model_providers.py
  7. 1 1
      api/core/app/app_config/easy_ui_based_app/prompt_template/manager.py
  8. 1 1
      api/core/app/app_config/easy_ui_based_app/variables/manager.py
  9. 1 1
      api/core/app/apps/base_app_runner.py
  10. 3 3
      api/core/app/apps/workflow_logging_callback.py
  11. 1 1
      api/core/model_runtime/model_providers/__base/ai_model.py
  12. 1 1
      api/core/model_runtime/model_providers/cohere/llm/llm.py
  13. 2 2
      api/core/model_runtime/model_providers/huggingface_hub/llm/llm.py
  14. 1 1
      api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py
  15. 2 2
      api/core/model_runtime/model_providers/ollama/llm/llm.py
  16. 1 1
      api/core/model_runtime/model_providers/replicate/llm/llm.py
  17. 1 1
      api/core/model_runtime/model_providers/tongyi/llm/llm.py
  18. 1 1
      api/core/rag/datasource/vdb/relyt/relyt_vector.py
  19. 1 1
      api/core/rag/docstore/dataset_docstore.py
  20. 1 1
      api/core/rag/extractor/notion_extractor.py
  21. 3 3
      api/core/rag/splitter/text_splitter.py
  22. 1 1
      api/core/tools/provider/builtin/gaode/gaode.py
  23. 1 1
      api/core/tools/provider/builtin/gaode/tools/gaode_weather.py
  24. 1 1
      api/core/tools/provider/builtin/github/tools/github_repositories.py
  25. 2 2
      api/core/tools/provider/builtin/pubmed/tools/pubmed_search.py
  26. 1 1
      api/core/tools/provider/builtin/twilio/tools/send_message.py
  27. 1 1
      api/libs/json_in_md_parser.py
  28. 3 3
      api/services/app_dsl_service.py
  29. 3 3
      api/services/dataset_service.py
  30. 1 1
      api/tests/unit_tests/core/prompt/test_advanced_prompt_transform.py

+ 4 - 5
api/commands.py

@@ -104,7 +104,7 @@ def reset_email(email, new_email, email_confirm):
 )
 @click.confirmation_option(
     prompt=click.style(
-        "Are you sure you want to reset encrypt key pair?" " this operation cannot be rolled back!", fg="red"
+        "Are you sure you want to reset encrypt key pair? this operation cannot be rolled back!", fg="red"
     )
 )
 def reset_encrypt_key_pair():
@@ -131,7 +131,7 @@ def reset_encrypt_key_pair():
 
         click.echo(
             click.style(
-                "Congratulations! " "the asymmetric key pair of workspace {} has been reset.".format(tenant.id),
+                "Congratulations! The asymmetric key pair of workspace {} has been reset.".format(tenant.id),
                 fg="green",
             )
         )
@@ -275,8 +275,7 @@ def migrate_knowledge_vector_database():
         for dataset in datasets:
             total_count = total_count + 1
             click.echo(
-                f"Processing the {total_count} dataset {dataset.id}. "
-                + f"{create_count} created, {skipped_count} skipped."
+                f"Processing the {total_count} dataset {dataset.id}. {create_count} created, {skipped_count} skipped."
             )
             try:
                 click.echo("Create dataset vdb index: {}".format(dataset.id))
@@ -594,7 +593,7 @@ def create_tenant(email: str, language: Optional[str] = None, name: Optional[str
 
     click.echo(
         click.style(
-            "Congratulations! Account and tenant created.\n" "Account: {}\nPassword: {}".format(email, new_password),
+            "Congratulations! Account and tenant created.\nAccount: {}\nPassword: {}".format(email, new_password),
             fg="green",
         )
     )

+ 3 - 3
api/configs/feature/__init__.py

@@ -129,12 +129,12 @@ class EndpointConfig(BaseSettings):
     )
 
     SERVICE_API_URL: str = Field(
-        description="Service API Url prefix." "used to display Service API Base Url to the front-end.",
+        description="Service API Url prefix. used to display Service API Base Url to the front-end.",
         default="",
     )
 
     APP_WEB_URL: str = Field(
-        description="WebApp Url prefix." "used to display WebAPP API Base Url to the front-end.",
+        description="WebApp Url prefix. used to display WebAPP API Base Url to the front-end.",
         default="",
     )
 
@@ -272,7 +272,7 @@ class LoggingConfig(BaseSettings):
     """
 
     LOG_LEVEL: str = Field(
-        description="Log output level, default to INFO." "It is recommended to set it to ERROR for production.",
+        description="Log output level, default to INFO. It is recommended to set it to ERROR for production.",
         default="INFO",
     )
 

+ 1 - 1
api/controllers/console/app/workflow.py

@@ -465,6 +465,6 @@ api.add_resource(
 api.add_resource(PublishedWorkflowApi, "/apps/<uuid:app_id>/workflows/publish")
 api.add_resource(DefaultBlockConfigsApi, "/apps/<uuid:app_id>/workflows/default-workflow-block-configs")
 api.add_resource(
-    DefaultBlockConfigApi, "/apps/<uuid:app_id>/workflows/default-workflow-block-configs" "/<string:block_type>"
+    DefaultBlockConfigApi, "/apps/<uuid:app_id>/workflows/default-workflow-block-configs/<string:block_type>"
 )
 api.add_resource(ConvertToWorkflowApi, "/apps/<uuid:app_id>/convert-to-workflow")

+ 1 - 1
api/controllers/console/datasets/datasets.py

@@ -399,7 +399,7 @@ class DatasetIndexingEstimateApi(Resource):
             )
         except LLMBadRequestError:
             raise ProviderNotInitializeError(
-                "No Embedding Model available. Please configure a valid provider " "in the Settings -> Model Provider."
+                "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
             )
         except ProviderTokenNotInitError as ex:
             raise ProviderNotInitializeError(ex.description)

+ 1 - 3
api/controllers/console/error.py

@@ -18,9 +18,7 @@ class NotSetupError(BaseHTTPException):
 
 class NotInitValidateError(BaseHTTPException):
     error_code = "not_init_validated"
-    description = (
-        "Init validation has not been completed yet. " "Please proceed with the init validation process first."
-    )
+    description = "Init validation has not been completed yet. Please proceed with the init validation process first."
     code = 401
 
 

+ 1 - 1
api/controllers/console/workspace/model_providers.py

@@ -218,7 +218,7 @@ api.add_resource(ModelProviderCredentialApi, "/workspaces/current/model-provider
 api.add_resource(ModelProviderValidateApi, "/workspaces/current/model-providers/<string:provider>/credentials/validate")
 api.add_resource(ModelProviderApi, "/workspaces/current/model-providers/<string:provider>")
 api.add_resource(
-    ModelProviderIconApi, "/workspaces/current/model-providers/<string:provider>/" "<string:icon_type>/<string:lang>"
+    ModelProviderIconApi, "/workspaces/current/model-providers/<string:provider>/<string:icon_type>/<string:lang>"
 )
 
 api.add_resource(

+ 1 - 1
api/core/app/app_config/easy_ui_based_app/prompt_template/manager.py

@@ -86,7 +86,7 @@ class PromptTemplateConfigManager:
         if config["prompt_type"] == PromptTemplateEntity.PromptType.ADVANCED.value:
             if not config["chat_prompt_config"] and not config["completion_prompt_config"]:
                 raise ValueError(
-                    "chat_prompt_config or completion_prompt_config is required " "when prompt_type is advanced"
+                    "chat_prompt_config or completion_prompt_config is required when prompt_type is advanced"
                 )
 
             model_mode_vals = [mode.value for mode in ModelMode]

+ 1 - 1
api/core/app/app_config/easy_ui_based_app/variables/manager.py

@@ -115,7 +115,7 @@ class BasicVariablesConfigManager:
 
             pattern = re.compile(r"^(?!\d)[\u4e00-\u9fa5A-Za-z0-9_\U0001F300-\U0001F64F\U0001F680-\U0001F6FF]{1,100}$")
             if pattern.match(form_item["variable"]) is None:
-                raise ValueError("variable in user_input_form must be a string, " "and cannot start with a number")
+                raise ValueError("variable in user_input_form must be a string, and cannot start with a number")
 
             variables.append(form_item["variable"])
 

+ 1 - 1
api/core/app/apps/base_app_runner.py

@@ -379,7 +379,7 @@ class AppRunner:
                 queue_manager=queue_manager,
                 app_generate_entity=application_generate_entity,
                 prompt_messages=prompt_messages,
-                text="I apologize for any confusion, " "but I'm an AI assistant to be helpful, harmless, and honest.",
+                text="I apologize for any confusion, but I'm an AI assistant to be helpful, harmless, and honest.",
                 stream=application_generate_entity.stream,
             )
 

+ 3 - 3
api/core/app/apps/workflow_logging_callback.py

@@ -84,7 +84,7 @@ class WorkflowLoggingCallback(WorkflowCallback):
         if route_node_state.node_run_result:
             node_run_result = route_node_state.node_run_result
             self.print_text(
-                f"Inputs: " f"{jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}",
+                f"Inputs: {jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}",
                 color="green",
             )
             self.print_text(
@@ -116,7 +116,7 @@ class WorkflowLoggingCallback(WorkflowCallback):
             node_run_result = route_node_state.node_run_result
             self.print_text(f"Error: {node_run_result.error}", color="red")
             self.print_text(
-                f"Inputs: " f"" f"{jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}",
+                f"Inputs: {jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}",
                 color="red",
             )
             self.print_text(
@@ -125,7 +125,7 @@ class WorkflowLoggingCallback(WorkflowCallback):
                 color="red",
             )
             self.print_text(
-                f"Outputs: " f"{jsonable_encoder(node_run_result.outputs) if node_run_result.outputs else ''}",
+                f"Outputs: {jsonable_encoder(node_run_result.outputs) if node_run_result.outputs else ''}",
                 color="red",
             )
 

+ 1 - 1
api/core/model_runtime/model_providers/__base/ai_model.py

@@ -200,7 +200,7 @@ class AIModel(ABC):
             except Exception as e:
                 model_schema_yaml_file_name = os.path.basename(model_schema_yaml_path).rstrip(".yaml")
                 raise Exception(
-                    f"Invalid model schema for {provider_name}.{model_type}.{model_schema_yaml_file_name}:" f" {str(e)}"
+                    f"Invalid model schema for {provider_name}.{model_type}.{model_schema_yaml_file_name}: {str(e)}"
                 )
 
             # cache model schema

+ 1 - 1
api/core/model_runtime/model_providers/cohere/llm/llm.py

@@ -621,7 +621,7 @@ class CohereLargeLanguageModel(LargeLanguageModel):
 
                 desc = p_val["description"]
                 if "enum" in p_val:
-                    desc += f"; Only accepts one of the following predefined options: " f"[{', '.join(p_val['enum'])}]"
+                    desc += f"; Only accepts one of the following predefined options: [{', '.join(p_val['enum'])}]"
 
                 parameter_definitions[p_key] = ToolParameterDefinitionsValue(
                     description=desc, type=p_val["type"], required=required

+ 2 - 2
api/core/model_runtime/model_providers/huggingface_hub/llm/llm.py

@@ -96,7 +96,7 @@ class HuggingfaceHubLargeLanguageModel(_CommonHuggingfaceHub, LargeLanguageModel
 
             if credentials["task_type"] not in ("text2text-generation", "text-generation"):
                 raise CredentialsValidateFailedError(
-                    "Huggingface Hub Task Type must be one of text2text-generation, " "text-generation."
+                    "Huggingface Hub Task Type must be one of text2text-generation, text-generation."
                 )
 
             client = InferenceClient(token=credentials["huggingfacehub_api_token"])
@@ -282,7 +282,7 @@ class HuggingfaceHubLargeLanguageModel(_CommonHuggingfaceHub, LargeLanguageModel
 
             valid_tasks = ("text2text-generation", "text-generation")
             if model_info.pipeline_tag not in valid_tasks:
-                raise ValueError(f"Model {model_name} is not a valid task, " f"must be one of {valid_tasks}.")
+                raise ValueError(f"Model {model_name} is not a valid task, must be one of {valid_tasks}.")
         except Exception as e:
             raise CredentialsValidateFailedError(f"{str(e)}")
 

+ 1 - 1
api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py

@@ -121,7 +121,7 @@ class HuggingfaceHubTextEmbeddingModel(_CommonHuggingfaceHub, TextEmbeddingModel
 
             valid_tasks = "feature-extraction"
             if model_info.pipeline_tag not in valid_tasks:
-                raise ValueError(f"Model {model_name} is not a valid task, " f"must be one of {valid_tasks}.")
+                raise ValueError(f"Model {model_name} is not a valid task, must be one of {valid_tasks}.")
         except Exception as e:
             raise CredentialsValidateFailedError(f"{str(e)}")
 

+ 2 - 2
api/core/model_runtime/model_providers/ollama/llm/llm.py

@@ -572,7 +572,7 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
                     label=I18nObject(en_US="Size of context window"),
                     type=ParameterType.INT,
                     help=I18nObject(
-                        en_US="Sets the size of the context window used to generate the next token. " "(Default: 2048)"
+                        en_US="Sets the size of the context window used to generate the next token. (Default: 2048)"
                     ),
                     default=2048,
                     min=1,
@@ -650,7 +650,7 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
                     label=I18nObject(en_US="Format"),
                     type=ParameterType.STRING,
                     help=I18nObject(
-                        en_US="the format to return a response in." " Currently the only accepted value is json."
+                        en_US="the format to return a response in. Currently the only accepted value is json."
                     ),
                     options=["json"],
                 ),

+ 1 - 1
api/core/model_runtime/model_providers/replicate/llm/llm.py

@@ -86,7 +86,7 @@ class ReplicateLargeLanguageModel(_CommonReplicate, LargeLanguageModel):
 
         if model.count("/") != 1:
             raise CredentialsValidateFailedError(
-                "Replicate Model Name must be provided, " "format: {user_name}/{model_name}"
+                "Replicate Model Name must be provided, format: {user_name}/{model_name}"
             )
 
         try:

+ 1 - 1
api/core/model_runtime/model_providers/tongyi/llm/llm.py

@@ -472,7 +472,7 @@ class TongyiLargeLanguageModel(LargeLanguageModel):
             for p_key, p_val in properties.items():
                 desc = p_val["description"]
                 if "enum" in p_val:
-                    desc += f"; Only accepts one of the following predefined options: " f"[{', '.join(p_val['enum'])}]"
+                    desc += f"; Only accepts one of the following predefined options: [{', '.join(p_val['enum'])}]"
 
                 properties_definitions[p_key] = {
                     "description": desc,

+ 1 - 1
api/core/rag/datasource/vdb/relyt/relyt_vector.py

@@ -245,7 +245,7 @@ class RelytVector(BaseVector):
         try:
             from sqlalchemy.engine import Row
         except ImportError:
-            raise ImportError("Could not import Row from sqlalchemy.engine. " "Please 'pip install sqlalchemy>=1.4'.")
+            raise ImportError("Could not import Row from sqlalchemy.engine. Please 'pip install sqlalchemy>=1.4'.")
 
         filter_condition = ""
         if filter is not None:

+ 1 - 1
api/core/rag/docstore/dataset_docstore.py

@@ -88,7 +88,7 @@ class DatasetDocumentStore:
             # NOTE: doc could already exist in the store, but we overwrite it
             if not allow_update and segment_document:
                 raise ValueError(
-                    f"doc_id {doc.metadata['doc_id']} already exists. " "Set allow_update to True to overwrite."
+                    f"doc_id {doc.metadata['doc_id']} already exists. Set allow_update to True to overwrite."
                 )
 
             # calc embedding use tokens

+ 1 - 1
api/core/rag/extractor/notion_extractor.py

@@ -50,7 +50,7 @@ class NotionExtractor(BaseExtractor):
                 integration_token = dify_config.NOTION_INTEGRATION_TOKEN
                 if integration_token is None:
                     raise ValueError(
-                        "Must specify `integration_token` or set environment " "variable `NOTION_INTEGRATION_TOKEN`."
+                        "Must specify `integration_token` or set environment variable `NOTION_INTEGRATION_TOKEN`."
                     )
 
                 self._notion_access_token = integration_token

+ 3 - 3
api/core/rag/splitter/text_splitter.py

@@ -60,7 +60,7 @@ class TextSplitter(BaseDocumentTransformer, ABC):
         """
         if chunk_overlap > chunk_size:
             raise ValueError(
-                f"Got a larger chunk overlap ({chunk_overlap}) than chunk size " f"({chunk_size}), should be smaller."
+                f"Got a larger chunk overlap ({chunk_overlap}) than chunk size ({chunk_size}), should be smaller."
             )
         self._chunk_size = chunk_size
         self._chunk_overlap = chunk_overlap
@@ -117,7 +117,7 @@ class TextSplitter(BaseDocumentTransformer, ABC):
             if total + _len + (separator_len if len(current_doc) > 0 else 0) > self._chunk_size:
                 if total > self._chunk_size:
                     logger.warning(
-                        f"Created a chunk of size {total}, " f"which is longer than the specified {self._chunk_size}"
+                        f"Created a chunk of size {total}, which is longer than the specified {self._chunk_size}"
                     )
                 if len(current_doc) > 0:
                     doc = self._join_docs(current_doc, separator)
@@ -153,7 +153,7 @@ class TextSplitter(BaseDocumentTransformer, ABC):
 
         except ImportError:
             raise ValueError(
-                "Could not import transformers python package. " "Please install it with `pip install transformers`."
+                "Could not import transformers python package. Please install it with `pip install transformers`."
             )
         return cls(length_function=_huggingface_tokenizer_length, **kwargs)
 

+ 1 - 1
api/core/tools/provider/builtin/gaode/gaode.py

@@ -14,7 +14,7 @@ class GaodeProvider(BuiltinToolProviderController):
 
             try:
                 response = requests.get(
-                    url="https://restapi.amap.com/v3/geocode/geo?address={address}&key={apikey}" "".format(
+                    url="https://restapi.amap.com/v3/geocode/geo?address={address}&key={apikey}".format(
                         address=urllib.parse.quote("广东省广州市天河区广州塔"), apikey=credentials.get("api_key")
                     )
                 )

+ 1 - 1
api/core/tools/provider/builtin/gaode/tools/gaode_weather.py

@@ -27,7 +27,7 @@ class GaodeRepositoriesTool(BuiltinTool):
             city_response = s.request(
                 method="GET",
                 headers={"Content-Type": "application/json; charset=utf-8"},
-                url="{url}/config/district?keywords={keywords}" "&subdistrict=0&extensions=base&key={apikey}" "".format(
+                url="{url}/config/district?keywords={keywords}&subdistrict=0&extensions=base&key={apikey}".format(
                     url=api_domain, keywords=city, apikey=self.runtime.credentials.get("api_key")
                 ),
             )

+ 1 - 1
api/core/tools/provider/builtin/github/tools/github_repositories.py

@@ -39,7 +39,7 @@ class GithubRepositoriesTool(BuiltinTool):
             response = s.request(
                 method="GET",
                 headers=headers,
-                url=f"{api_domain}/search/repositories?" f"q={quote(query)}&sort=stars&per_page={top_n}&order=desc",
+                url=f"{api_domain}/search/repositories?q={quote(query)}&sort=stars&per_page={top_n}&order=desc",
             )
             response_data = response.json()
             if response.status_code == 200 and isinstance(response_data.get("items"), list):

+ 2 - 2
api/core/tools/provider/builtin/pubmed/tools/pubmed_search.py

@@ -51,7 +51,7 @@ class PubMedAPIWrapper(BaseModel):
         try:
             # Retrieve the top-k results for the query
             docs = [
-                f"Published: {result['pub_date']}\nTitle: {result['title']}\n" f"Summary: {result['summary']}"
+                f"Published: {result['pub_date']}\nTitle: {result['title']}\nSummary: {result['summary']}"
                 for result in self.load(query[: self.ARXIV_MAX_QUERY_LENGTH])
             ]
 
@@ -97,7 +97,7 @@ class PubMedAPIWrapper(BaseModel):
                 if e.code == 429 and retry < self.max_retry:
                     # Too Many Requests error
                     # wait for an exponentially increasing amount of time
-                    print(f"Too Many Requests, " f"waiting for {self.sleep_time:.2f} seconds...")
+                    print(f"Too Many Requests, waiting for {self.sleep_time:.2f} seconds...")
                     time.sleep(self.sleep_time)
                     self.sleep_time *= 2
                     retry += 1

+ 1 - 1
api/core/tools/provider/builtin/twilio/tools/send_message.py

@@ -39,7 +39,7 @@ class TwilioAPIWrapper(BaseModel):
         try:
             from twilio.rest import Client
         except ImportError:
-            raise ImportError("Could not import twilio python package. " "Please install it with `pip install twilio`.")
+            raise ImportError("Could not import twilio python package. Please install it with `pip install twilio`.")
         account_sid = values.get("account_sid")
         auth_token = values.get("auth_token")
         values["from_number"] = values.get("from_number")

+ 1 - 1
api/libs/json_in_md_parser.py

@@ -37,6 +37,6 @@ def parse_and_check_json_markdown(text: str, expected_keys: list[str]) -> dict:
     for key in expected_keys:
         if key not in json_obj:
             raise OutputParserError(
-                f"Got invalid return object. Expected key `{key}` " f"to be present, but got {json_obj}"
+                f"Got invalid return object. Expected key `{key}` to be present, but got {json_obj}"
             )
     return json_obj

+ 3 - 3
api/services/app_dsl_service.py

@@ -238,7 +238,7 @@ class AppDslService:
         :param use_icon_as_answer_icon: use app icon as answer icon
         """
         if not workflow_data:
-            raise ValueError("Missing workflow in data argument " "when app mode is advanced-chat or workflow")
+            raise ValueError("Missing workflow in data argument when app mode is advanced-chat or workflow")
 
         app = cls._create_app(
             tenant_id=tenant_id,
@@ -283,7 +283,7 @@ class AppDslService:
         :param account: Account instance
         """
         if not workflow_data:
-            raise ValueError("Missing workflow in data argument " "when app mode is advanced-chat or workflow")
+            raise ValueError("Missing workflow in data argument when app mode is advanced-chat or workflow")
 
         # fetch draft workflow by app_model
         workflow_service = WorkflowService()
@@ -337,7 +337,7 @@ class AppDslService:
         :param icon_background: app icon background
         """
         if not model_config_data:
-            raise ValueError("Missing model_config in data argument " "when app mode is chat, agent-chat or completion")
+            raise ValueError("Missing model_config in data argument when app mode is chat, agent-chat or completion")
 
         app = cls._create_app(
             tenant_id=tenant_id,

+ 3 - 3
api/services/dataset_service.py

@@ -181,7 +181,7 @@ class DatasetService:
                     "in the Settings -> Model Provider."
                 )
             except ProviderTokenNotInitError as ex:
-                raise ValueError(f"The dataset in unavailable, due to: " f"{ex.description}")
+                raise ValueError(f"The dataset in unavailable, due to: {ex.description}")
 
     @staticmethod
     def check_embedding_model_setting(tenant_id: str, embedding_model_provider: str, embedding_model: str):
@@ -195,10 +195,10 @@ class DatasetService:
             )
         except LLMBadRequestError:
             raise ValueError(
-                "No Embedding Model available. Please configure a valid provider " "in the Settings -> Model Provider."
+                "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
             )
         except ProviderTokenNotInitError as ex:
-            raise ValueError(f"The dataset in unavailable, due to: " f"{ex.description}")
+            raise ValueError(f"The dataset in unavailable, due to: {ex.description}")
 
     @staticmethod
     def update_dataset(dataset_id, data, user):

+ 1 - 1
api/tests/unit_tests/core/prompt/test_advanced_prompt_transform.py

@@ -53,7 +53,7 @@ def test__get_completion_model_prompt_messages():
             "#context#": context,
             "#histories#": "\n".join(
                 [
-                    f"{'Human' if prompt.role.value == 'user' else 'Assistant'}: " f"{prompt.content}"
+                    f"{'Human' if prompt.role.value == 'user' else 'Assistant'}: {prompt.content}"
                     for prompt in history_prompt_messages
                 ]
             ),