From f58b9c59ae3a6a4137095a8aa7ddb1f0f6efd136 Mon Sep 17 00:00:00 2001 From: George Sittas Date: Mon, 23 Jun 2025 13:51:12 +0300 Subject: [PATCH] Chore: do not log test results if there are no tests --- sqlmesh/core/console.py | 12 ++++++++++++ tests/core/test_test.py | 24 ++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/sqlmesh/core/console.py b/sqlmesh/core/console.py index cc112e6ebd..6b600dcd73 100644 --- a/sqlmesh/core/console.py +++ b/sqlmesh/core/console.py @@ -1954,6 +1954,10 @@ def _prompt_promote(self, plan_builder: PlanBuilder) -> None: plan_builder.apply() def log_test_results(self, result: ModelTextTestResult, target_dialect: str) -> None: + # We don't log the test results if no tests were ran + if not result.testsRun: + return + divider_length = 70 self._log_test_details(result) @@ -2827,6 +2831,10 @@ def radio_button_selected(change: t.Dict[str, t.Any]) -> None: self.display(radio) def log_test_results(self, result: ModelTextTestResult, target_dialect: str) -> None: + # We don't log the test results if no tests were ran + if not result.testsRun: + return + import ipywidgets as widgets divider_length = 70 @@ -3206,6 +3214,10 @@ def log_success(self, message: str) -> None: self._print(message) def log_test_results(self, result: ModelTextTestResult, target_dialect: str) -> None: + # We don't log the test results if no tests were ran + if not result.testsRun: + return + message = f"Ran `{result.testsRun}` Tests Against `{target_dialect}`" if result.wasSuccessful(): diff --git a/tests/core/test_test.py b/tests/core/test_test.py index d803f8bdc9..f4d495801a 100644 --- a/tests/core/test_test.py +++ b/tests/core/test_test.py @@ -2685,6 +2685,10 @@ def test_model_test_text_result_reporting_no_traceback( else: result.addFailure(test, (e.__class__, e, e.__traceback__)) + # Since we're simulating an error/failure, this doesn't go through the + # test runner logic, so we need to manually set how many tests were ran + result.testsRun = 1 + with capture_output() as captured_output: get_console().log_test_results(result, "duckdb") @@ -2729,3 +2733,23 @@ def test_timestamp_normalization() -> None: context=Context(config=Config(model_defaults=ModelDefaultsConfig(dialect="duckdb"))), ).run() ) + + +@use_terminal_console +def test_disable_test_logging_if_no_tests_found(mocker: MockerFixture, tmp_path: Path) -> None: + init_example_project(tmp_path, dialect="duckdb") + + config = Config( + default_connection=DuckDBConnectionConfig(), + model_defaults=ModelDefaultsConfig(dialect="duckdb"), + default_test_connection=DuckDBConnectionConfig(concurrent_tasks=8), + ) + + rmtree(tmp_path / "tests") + + with capture_output() as captured_output: + context = Context(paths=tmp_path, config=config) + context.plan(no_prompts=True, auto_apply=True) + + output = captured_output.stdout + assert "test" not in output.lower()