Ver Fonte

chore: apply flake8-pytest-style linter rules (#8307)

Bowen Liang há 7 meses atrás
pai
commit
8815511ccb

+ 2 - 0
api/pyproject.toml

@@ -18,6 +18,7 @@ select = [
     "FURB", # refurb rules
     "I", # isort rules
     "N", # pep8-naming
+    "PT", # flake8-pytest-style rules
     "RUF019", # unnecessary-key-check
     "RUF100", # unused-noqa
     "RUF101", # redirected-noqa
@@ -50,6 +51,7 @@ ignore = [
     "B905", # zip-without-explicit-strict
     "N806", # non-lowercase-variable-in-function
     "N815", # mixed-case-variable-in-class-scope
+    "PT011", # pytest-raises-too-broad
     "SIM102", # collapsible-if
     "SIM103", # needless-bool
     "SIM105", # suppressible-exception

+ 6 - 6
api/tests/integration_tests/model_runtime/xinference/test_llm.py

@@ -20,7 +20,7 @@ from tests.integration_tests.model_runtime.__mock.openai import setup_openai_moc
 from tests.integration_tests.model_runtime.__mock.xinference import setup_xinference_mock
 
 
-@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["chat", "none"]], indirect=True)
+@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("chat", "none")], indirect=True)
 def test_validate_credentials_for_chat_model(setup_openai_mock, setup_xinference_mock):
     model = XinferenceAILargeLanguageModel()
 
@@ -45,7 +45,7 @@ def test_validate_credentials_for_chat_model(setup_openai_mock, setup_xinference
     )
 
 
-@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["chat", "none"]], indirect=True)
+@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("chat", "none")], indirect=True)
 def test_invoke_chat_model(setup_openai_mock, setup_xinference_mock):
     model = XinferenceAILargeLanguageModel()
 
@@ -75,7 +75,7 @@ def test_invoke_chat_model(setup_openai_mock, setup_xinference_mock):
     assert response.usage.total_tokens > 0
 
 
-@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["chat", "none"]], indirect=True)
+@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("chat", "none")], indirect=True)
 def test_invoke_stream_chat_model(setup_openai_mock, setup_xinference_mock):
     model = XinferenceAILargeLanguageModel()
 
@@ -236,7 +236,7 @@ def test_invoke_stream_chat_model(setup_openai_mock, setup_xinference_mock):
 #     assert response.message.tool_calls[0].function.name == 'get_current_weather'
 
 
-@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["completion", "none"]], indirect=True)
+@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("completion", "none")], indirect=True)
 def test_validate_credentials_for_generation_model(setup_openai_mock, setup_xinference_mock):
     model = XinferenceAILargeLanguageModel()
 
@@ -261,7 +261,7 @@ def test_validate_credentials_for_generation_model(setup_openai_mock, setup_xinf
     )
 
 
-@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["completion", "none"]], indirect=True)
+@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("completion", "none")], indirect=True)
 def test_invoke_generation_model(setup_openai_mock, setup_xinference_mock):
     model = XinferenceAILargeLanguageModel()
 
@@ -286,7 +286,7 @@ def test_invoke_generation_model(setup_openai_mock, setup_xinference_mock):
     assert response.usage.total_tokens > 0
 
 
-@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["completion", "none"]], indirect=True)
+@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("completion", "none")], indirect=True)
 def test_invoke_stream_generation_model(setup_openai_mock, setup_xinference_mock):
     model = XinferenceAILargeLanguageModel()
 

+ 4 - 2
api/tests/integration_tests/utils/test_module_import_helper.py

@@ -9,7 +9,8 @@ def test_loading_subclass_from_source():
     module = load_single_subclass_from_source(
         module_name="ChildClass", script_path=os.path.join(current_path, "child_class.py"), parent_type=ParentClass
     )
-    assert module and module.__name__ == "ChildClass"
+    assert module
+    assert module.__name__ == "ChildClass"
 
 
 def test_load_import_module_from_source():
@@ -17,7 +18,8 @@ def test_load_import_module_from_source():
     module = import_module_from_source(
         module_name="ChildClass", py_file_path=os.path.join(current_path, "child_class.py")
     )
-    assert module and module.__name__ == "ChildClass"
+    assert module
+    assert module.__name__ == "ChildClass"
 
 
 def test_lazy_loading_subclass_from_source():

+ 1 - 1
api/tests/integration_tests/vdb/opensearch/test_opensearch.py

@@ -34,7 +34,7 @@ class TestOpenSearchVector:
         self.vector._client = MagicMock()
 
     @pytest.mark.parametrize(
-        "search_response, expected_length, expected_doc_id",
+        ("search_response", "expected_length", "expected_doc_id"),
         [
             (
                 {

+ 1 - 1
api/tests/unit_tests/conftest.py

@@ -13,7 +13,7 @@ CACHED_APP = Flask(__name__)
 CACHED_APP.config.update({"TESTING": True})
 
 
-@pytest.fixture()
+@pytest.fixture
 def app() -> Flask:
     return CACHED_APP
 

+ 4 - 4
api/tests/unit_tests/core/helper/test_ssrf_proxy.py

@@ -1,6 +1,8 @@
 import random
 from unittest.mock import MagicMock, patch
 
+import pytest
+
 from core.helper.ssrf_proxy import SSRF_DEFAULT_MAX_RETRIES, STATUS_FORCELIST, make_request
 
 
@@ -22,11 +24,9 @@ def test_retry_exceed_max_retries(mock_request):
     side_effects = [mock_response] * SSRF_DEFAULT_MAX_RETRIES
     mock_request.side_effect = side_effects
 
-    try:
+    with pytest.raises(Exception) as e:
         make_request("GET", "http://example.com", max_retries=SSRF_DEFAULT_MAX_RETRIES - 1)
-        raise AssertionError("Expected Exception not raised")
-    except Exception as e:
-        assert str(e) == f"Reached maximum retries ({SSRF_DEFAULT_MAX_RETRIES - 1}) for URL http://example.com"
+    assert str(e.value) == f"Reached maximum retries ({SSRF_DEFAULT_MAX_RETRIES - 1}) for URL http://example.com"
 
 
 @patch("httpx.request")

+ 5 - 9
api/tests/unit_tests/libs/test_email.py

@@ -1,3 +1,5 @@
+import pytest
+
 from libs.helper import email
 
 
@@ -9,17 +11,11 @@ def test_email_with_valid_email():
 
 
 def test_email_with_invalid_email():
-    try:
+    with pytest.raises(ValueError, match="invalid_email is not a valid email."):
         email("invalid_email")
-    except ValueError as e:
-        assert str(e) == "invalid_email is not a valid email."
 
-    try:
+    with pytest.raises(ValueError, match="@example.com is not a valid email."):
         email("@example.com")
-    except ValueError as e:
-        assert str(e) == "@example.com is not a valid email."
 
-    try:
+    with pytest.raises(ValueError, match="()@example.com is not a valid email."):
         email("()@example.com")
-    except ValueError as e:
-        assert str(e) == "()@example.com is not a valid email."