Skip to content

Commit 894bc78

Browse files
authored
Fix: use MarkdownConsole in non-interactive contexts (#4306)
1 parent 983aff7 commit 894bc78

7 files changed

Lines changed: 379 additions & 54 deletions

File tree

docs/reference/configuration.md

Lines changed: 33 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -248,32 +248,64 @@ For example, you might have a specific connection where your tests should run re
248248

249249
## Debug mode
250250

251-
To enable debug mode set the `SQLMESH_DEBUG` environment variable to one of the following values: "1", "true", "t", "yes" or "y".
251+
Enable debug mode in one of two ways:
252+
253+
- Pass the `--debug` flag between the CLI command and the subcommand. For example, `sqlmesh --debug plan`.
254+
- Set the `SQLMESH_DEBUG` environment variable to one of the following values: "1", "true", "t", "yes" or "y".
252255

253256
Enabling this mode ensures that full backtraces are printed when using CLI. The default log level is set to `DEBUG` when this mode is enabled.
254257

255258
Example enabling debug mode for the CLI command `sqlmesh plan`:
256259

257260
=== "Bash"
258261

262+
```bash
263+
$ sqlmesh --debug plan
264+
```
265+
259266
```bash
260267
$ SQLMESH_DEBUG=1 sqlmesh plan
261268
```
262269

263270
=== "MS Powershell"
264271

272+
```powershell
273+
PS> sqlmesh --debug plan
274+
```
275+
265276
```powershell
266277
PS> $env:SQLMESH_DEBUG=1
267278
PS> sqlmesh plan
268279
```
269280

270281
=== "MS CMD"
271282

283+
```cmd
284+
C:\> sqlmesh --debug plan
285+
```
286+
272287
```cmd
273288
C:\> set SQLMESH_DEBUG=1
274289
C:\> sqlmesh plan
275290
```
276291

292+
## Runtime Environment
293+
294+
SQLMesh can run in different runtime environments. For example, you might run it in a regular command-line terminal, in a Jupyter notebook, or in Github's CI/CD platform.
295+
296+
When it starts up, SQLMesh automatically detects the runtime environment and adjusts its behavior accordingly. For example, it registers `%magic` commands if in a Jupyter notebook and adjusts logging behavior if in a CI/CD environment.
297+
298+
If necessary, you may force SQLMesh to use a specific runtime environment by setting the `SQLMESH_RUNTIME_ENVIRONMENT` environment variable.
299+
300+
It accepts the following values, which will cause SQLMesh to behave as if it were in the runtime environment in parentheses:
301+
302+
- `terminal` (CLI console)
303+
- `databricks` (Databricks notebook)
304+
- `google_colab` (Google Colab notebook)
305+
- `jupyter` (Jupyter notebook)
306+
- `debugger` (Debugging output)
307+
- `ci` (CI/CD or other non-interactive environment)
308+
277309
## Anonymized usage information
278310

279311
We strive to make SQLMesh the best data transformation tool on the market. Part of accomplishing that is continually fixing bugs, adding features, and improving SQLMesh's performance.

sqlmesh/__init__.py

Lines changed: 32 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
from sqlmesh.utils import (
3434
debug_mode_enabled as debug_mode_enabled,
3535
enable_debug_mode as enable_debug_mode,
36+
str_to_bool,
3637
)
3738
from sqlmesh.utils.date import DatetimeRanges as DatetimeRanges
3839

@@ -54,6 +55,7 @@ class RuntimeEnv(str, Enum):
5455
GOOGLE_COLAB = "google_colab" # Not currently officially supported
5556
JUPYTER = "jupyter"
5657
DEBUGGER = "debugger"
58+
CI = "ci" # CI or other envs that shouldn't use emojis
5759

5860
@classmethod
5961
def get(cls) -> RuntimeEnv:
@@ -62,6 +64,16 @@ def get(cls) -> RuntimeEnv:
6264
6365
Unlike the rich implementation we try to split out by notebook type instead of treating it all as Jupyter.
6466
"""
67+
runtime_env_var = os.getenv("SQLMESH_RUNTIME_ENVIRONMENT")
68+
if runtime_env_var:
69+
try:
70+
return RuntimeEnv(runtime_env_var)
71+
except ValueError:
72+
valid_values = [f'"{member.value}"' for member in RuntimeEnv]
73+
raise ValueError(
74+
f"Invalid SQLMESH_RUNTIME_ENVIRONMENT value: {runtime_env_var}. Must be one of {', '.join(valid_values)}."
75+
)
76+
6577
try:
6678
shell = get_ipython() # type: ignore
6779
if os.getenv("DATABRICKS_RUNTIME_VERSION"):
@@ -75,6 +87,10 @@ def get(cls) -> RuntimeEnv:
7587

7688
if debug_mode_enabled():
7789
return RuntimeEnv.DEBUGGER
90+
91+
if is_cicd_environment() or not is_interactive_environment():
92+
return RuntimeEnv.CI
93+
7894
return RuntimeEnv.TERMINAL
7995

8096
@property
@@ -93,9 +109,24 @@ def is_jupyter(self) -> bool:
93109
def is_google_colab(self) -> bool:
94110
return self == RuntimeEnv.GOOGLE_COLAB
95111

112+
@property
113+
def is_ci(self) -> bool:
114+
return self == RuntimeEnv.CI
115+
96116
@property
97117
def is_notebook(self) -> bool:
98-
return not self.is_terminal
118+
return not self.is_terminal and not self.is_ci
119+
120+
121+
def is_cicd_environment() -> bool:
122+
for key in ("CI", "GITHUB_ACTIONS", "TRAVIS", "CIRCLECI", "GITLAB_CI", "BUILDKITE"):
123+
if str_to_bool(os.environ.get(key, "false")):
124+
return True
125+
return False
126+
127+
128+
def is_interactive_environment() -> bool:
129+
return sys.stdin.isatty() and sys.stdout.isatty()
99130

100131

101132
if RuntimeEnv.get().is_notebook:

sqlmesh/core/console.py

Lines changed: 82 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -78,9 +78,6 @@
7878

7979
PROGRESS_BAR_WIDTH = 40
8080
LINE_WRAP_WIDTH = 100
81-
CHECK_MARK = "\u2714"
82-
GREEN_CHECK_MARK = f"[green]{CHECK_MARK}[/green]"
83-
RED_X_MARK = "\u274c"
8481

8582

8683
class LinterConsole(abc.ABC):
@@ -770,6 +767,11 @@ class TerminalConsole(Console):
770767

771768
TABLE_DIFF_SOURCE_BLUE = "#0248ff"
772769
TABLE_DIFF_TARGET_GREEN = "green"
770+
AUDIT_PASS_MARK = "\u2714"
771+
GREEN_AUDIT_PASS_MARK = f"[green]{AUDIT_PASS_MARK}[/green]"
772+
AUDIT_FAIL_MARK = "\u274c"
773+
AUDIT_PADDING = 0
774+
CHECK_MARK = f"{AUDIT_PASS_MARK} "
773775

774776
def __init__(
775777
self,
@@ -879,7 +881,9 @@ def start_evaluation_progress(
879881
progress_table.add_row(self.evaluation_total_progress)
880882
progress_table.add_row(self.evaluation_model_progress)
881883

882-
self.evaluation_progress_live = Live(progress_table, refresh_per_second=10)
884+
self.evaluation_progress_live = Live(
885+
progress_table, console=self.console, refresh_per_second=10
886+
)
883887
self.evaluation_progress_live.start()
884888

885889
batch_sizes = {
@@ -891,7 +895,7 @@ def start_evaluation_progress(
891895

892896
# determine column widths
893897
self.evaluation_column_widths["annotation"] = (
894-
_calculate_annotation_str_len(batched_intervals)
898+
_calculate_annotation_str_len(batched_intervals, self.AUDIT_PADDING)
895899
+ 3 # brackets and opening escape backslash
896900
)
897901
self.evaluation_column_widths["name"] = max(
@@ -956,21 +960,24 @@ def update_snapshot_evaluation_progress(
956960
)
957961
audits_str = ""
958962
if num_audits_passed:
959-
audits_str += f" {CHECK_MARK}{num_audits_passed}"
963+
audits_str += f" {self.AUDIT_PASS_MARK}{num_audits_passed}"
960964
if num_audits_failed:
961-
audits_str += f" {RED_X_MARK}{num_audits_failed}"
965+
audits_str += f" {self.AUDIT_FAIL_MARK}{num_audits_failed}"
962966
audits_str = f", audits{audits_str}" if audits_str else ""
963967
annotation_len = self.evaluation_column_widths["annotation"]
968+
# don't adjust the annotation_len if we're using AUDIT_PADDING
964969
annotation = f"\\[{annotation + audits_str}]".ljust(
965-
annotation_len - 1 if num_audits_failed else annotation_len
970+
annotation_len - 1
971+
if num_audits_failed and self.AUDIT_PADDING == 0
972+
else annotation_len
966973
)
967974

968975
duration = f"{(duration_ms / 1000.0):.2f}s".ljust(
969976
self.evaluation_column_widths["duration"]
970977
)
971978

972979
msg = f"{batch} {display_name} {annotation} {duration}".replace(
973-
CHECK_MARK, GREEN_CHECK_MARK
980+
self.AUDIT_PASS_MARK, self.GREEN_AUDIT_PASS_MARK
974981
)
975982

976983
self.evaluation_progress_live.console.print(msg)
@@ -989,7 +996,7 @@ def stop_evaluation_progress(self, success: bool = True) -> None:
989996
if self.evaluation_progress_live:
990997
self.evaluation_progress_live.stop()
991998
if success:
992-
self.log_success(f"{GREEN_CHECK_MARK} Model batches executed")
999+
self.log_success(f"{self.CHECK_MARK}Model batches executed")
9931000

9941001
self.evaluation_progress_live = None
9951002
self.evaluation_total_progress = None
@@ -1053,7 +1060,7 @@ def stop_creation_progress(self, success: bool = True) -> None:
10531060
self.creation_progress.stop()
10541061
self.creation_progress = None
10551062
if success:
1056-
self.log_success(f"\n{GREEN_CHECK_MARK} Physical layer updated")
1063+
self.log_success(f"\n{self.CHECK_MARK}Physical layer updated")
10571064

10581065
self.environment_naming_info = EnvironmentNamingInfo()
10591066
self.default_catalog = None
@@ -1154,7 +1161,7 @@ def stop_promotion_progress(self, success: bool = True) -> None:
11541161
self.promotion_progress.stop()
11551162
self.promotion_progress = None
11561163
if success:
1157-
self.log_success(f"\n{GREEN_CHECK_MARK} Virtual layer updated")
1164+
self.log_success(f"\n{self.CHECK_MARK}Virtual layer updated")
11581165

11591166
self.environment_naming_info = EnvironmentNamingInfo()
11601167
self.default_catalog = None
@@ -2807,6 +2814,12 @@ class MarkdownConsole(CaptureTerminalConsole):
28072814
where you want to display a plan or test results in markdown.
28082815
"""
28092816

2817+
CHECK_MARK = ""
2818+
AUDIT_PASS_MARK = "passed "
2819+
GREEN_AUDIT_PASS_MARK = AUDIT_PASS_MARK
2820+
AUDIT_FAIL_MARK = "failed "
2821+
AUDIT_PADDING = 7
2822+
28102823
def __init__(self, **kwargs: t.Any) -> None:
28112824
super().__init__(**{**kwargs, "console": RichConsole(no_color=True)})
28122825

@@ -2822,23 +2835,28 @@ def show_environment_difference_summary(
28222835
no_diff: Hide the actual environment statements differences.
28232836
"""
28242837
if context_diff.is_new_environment:
2825-
self._print(
2826-
f"**New environment `{context_diff.environment}` will be created from `{context_diff.create_from}`**\n"
2838+
msg = (
2839+
f"\n**`{context_diff.environment}` environment will be initialized**"
2840+
if not context_diff.create_from_env_exists
2841+
else f"\n**New environment `{context_diff.environment}` will be created from `{context_diff.create_from}`**"
28272842
)
2843+
self._print(msg)
28282844
if not context_diff.has_snapshot_changes:
28292845
return
28302846

28312847
if not context_diff.has_changes:
2832-
self._print(f"**No differences when compared to `{context_diff.environment}`**\n")
2848+
self._print(
2849+
f"\n**No changes to plan: project files match the `{context_diff.environment}` environment**\n"
2850+
)
28332851
return
28342852

2835-
self._print(f"**Summary of differences against `{context_diff.environment}`:**\n")
2853+
self._print(f"\n**Summary of differences from `{context_diff.environment}`:**")
28362854

28372855
if context_diff.has_requirement_changes:
2838-
self._print(f"Requirements:\n{context_diff.requirements_diff()}")
2856+
self._print(f"\nRequirements:\n{context_diff.requirements_diff()}")
28392857

28402858
if context_diff.has_environment_statements_changes and not no_diff:
2841-
self._print("[bold]Environment statements:\n")
2859+
self._print("\nEnvironment statements:\n")
28422860
for _, diff in context_diff.environment_statements_diff(
28432861
include_python_env=not context_diff.is_new_environment
28442862
):
@@ -2984,7 +3002,7 @@ def _show_missing_dates(self, plan: Plan, default_catalog: t.Optional[str]) -> N
29843002
dialect=self.dialect,
29853003
)
29863004
snapshots.append(
2987-
f"* `{display_name}`: [{_format_missing_intervals(snapshot, missing)}]{preview_modifier}"
3005+
f"* `{display_name}`: \\[{_format_missing_intervals(snapshot, missing)}]{preview_modifier}"
29883006
)
29893007

29903008
length = len(snapshots)
@@ -3033,6 +3051,21 @@ def _show_categorized_snapshots(self, plan: Plan, default_catalog: t.Optional[st
30333051
self._print(tree)
30343052
self._print("\n```")
30353053

3054+
def stop_evaluation_progress(self, success: bool = True) -> None:
3055+
super().stop_evaluation_progress(success)
3056+
self._print("\n")
3057+
3058+
def stop_creation_progress(self, success: bool = True) -> None:
3059+
super().stop_creation_progress(success)
3060+
self._print("\n")
3061+
3062+
def stop_promotion_progress(self, success: bool = True) -> None:
3063+
super().stop_promotion_progress(success)
3064+
self._print("\n")
3065+
3066+
def log_success(self, message: str) -> None:
3067+
self._print(message)
3068+
30363069
def log_test_results(
30373070
self, result: unittest.result.TestResult, output: t.Optional[str], target_dialect: str
30383071
) -> None:
@@ -3082,6 +3115,12 @@ def log_warning(self, short_message: str, long_message: t.Optional[str] = None)
30823115
logger.warning(long_message or short_message)
30833116
self._print(f"```\n\\[WARNING] {short_message}```\n\n")
30843117

3118+
def _print(self, value: t.Any, **kwargs: t.Any) -> None:
3119+
self.console.print(value, **kwargs)
3120+
with self.console.capture() as capture:
3121+
self.console.print(value, **kwargs)
3122+
self._captured_outputs.append(capture.get())
3123+
30853124

30863125
class DatabricksMagicConsole(CaptureTerminalConsole):
30873126
"""
@@ -3473,6 +3512,7 @@ def create_console(
34733512
RuntimeEnv.TERMINAL: TerminalConsole,
34743513
RuntimeEnv.GOOGLE_COLAB: NotebookMagicConsole,
34753514
RuntimeEnv.DEBUGGER: DebuggerTerminalConsole,
3515+
RuntimeEnv.CI: MarkdownConsole,
34763516
}
34773517
rich_console_kwargs: t.Dict[str, t.Any] = {"theme": srich.theme}
34783518
if runtime_env.is_jupyter or runtime_env.is_google_colab:
@@ -3598,7 +3638,7 @@ def _calculate_interval_str_len(snapshot: Snapshot, intervals: t.List[Interval])
35983638
return interval_str_len
35993639

36003640

3601-
def _calculate_audit_str_len(snapshot: Snapshot) -> int:
3641+
def _calculate_audit_str_len(snapshot: Snapshot, audit_padding: int = 0) -> int:
36023642
# The annotation includes audit results. We cannot build the audits result string
36033643
# until after evaluation occurs, but we must determine the annotation column width here.
36043644
# Therefore, we add enough padding for the longest possible audits result string.
@@ -3619,21 +3659,38 @@ def _calculate_audit_str_len(snapshot: Snapshot) -> int:
36193659
)
36203660
if num_audits == 1:
36213661
# +1 for "1" audit count, +1 for red X
3622-
audit_len = audit_base_str_len + (2 if num_nonblocking_audits else 1)
3662+
# if audit_padding is > 0 we're using "failed" instead of red X
3663+
audit_len = (
3664+
audit_base_str_len
3665+
+ (2 if num_nonblocking_audits else 1)
3666+
+ (
3667+
audit_padding - 1
3668+
if num_nonblocking_audits and audit_padding > 0
3669+
else audit_padding
3670+
)
3671+
)
36233672
else:
3624-
audit_len = audit_base_str_len + len(str(num_audits))
3673+
audit_len = audit_base_str_len + len(str(num_audits)) + audit_padding
36253674
if num_nonblocking_audits:
36263675
# +1 for space, +1 for red X
3627-
audit_len += len(str(num_nonblocking_audits)) + 2
3676+
# if audit_padding is > 0 we're using "failed" instead of red X
3677+
audit_len += (
3678+
len(str(num_nonblocking_audits))
3679+
+ 2
3680+
+ (audit_padding - 1 if audit_padding > 0 else audit_padding)
3681+
)
36283682
audit_str_len = max(audit_str_len, audit_len)
36293683
return audit_str_len
36303684

36313685

3632-
def _calculate_annotation_str_len(batched_intervals: t.Dict[Snapshot, t.List[Interval]]) -> int:
3686+
def _calculate_annotation_str_len(
3687+
batched_intervals: t.Dict[Snapshot, t.List[Interval]], audit_padding: int = 0
3688+
) -> int:
36333689
annotation_str_len = 0
36343690
for snapshot, intervals in batched_intervals.items():
36353691
annotation_str_len = max(
36363692
annotation_str_len,
3637-
_calculate_interval_str_len(snapshot, intervals) + _calculate_audit_str_len(snapshot),
3693+
_calculate_interval_str_len(snapshot, intervals)
3694+
+ _calculate_audit_str_len(snapshot, audit_padding),
36383695
)
36393696
return annotation_str_len

0 commit comments

Comments
 (0)