3535 SnapshotId ,
3636 SnapshotInfoLike ,
3737)
38- from sqlmesh .core .snapshot .definition import Interval , Intervals
38+ from sqlmesh .core .snapshot .definition import Interval
3939from sqlmesh .core .test import ModelTest
4040from sqlmesh .utils import rich as srich
4141from sqlmesh .utils import Verbosity
4242from sqlmesh .utils .concurrency import NodeExecutionFailedError
43- from sqlmesh .utils .date import time_like_to_str , to_date , yesterday_ds , to_ds , to_tstz
43+ from sqlmesh .utils .date import time_like_to_str , to_date , yesterday_ds , to_ds , to_datetime
4444from sqlmesh .utils .errors import (
4545 PythonModelEvalError ,
4646 NodeAuditsErrors ,
7474
7575PROGRESS_BAR_WIDTH = 40
7676LINE_WRAP_WIDTH = 100
77- GREEN_CHECK_MARK = "[green]\u2714 [/green]"
77+ CHECK_MARK = "\u2714 "
78+ GREEN_CHECK_MARK = f"[green]{ CHECK_MARK } [/green]"
7879RED_X_MARK = "\u274c "
7980
8081
@@ -533,8 +534,8 @@ class TerminalConsole(Console):
533534
534535 TABLE_DIFF_SOURCE_BLUE = "#0248ff"
535536
536- EVAL_PROGRESS_BAR_COLUMN_WIDTHS : t .Dict [str , int ] = {
537- "batch" : 9 ,
537+ PROGRESS_BAR_COLUMN_WIDTHS : t .Dict [str , int ] = {
538+ "batch" : 7 ,
538539 "name" : 50 ,
539540 "annotation" : 50 ,
540541 "duration" : 8 ,
@@ -602,7 +603,7 @@ def start_evaluation_progress(
602603 """Indicates that a new snapshot evaluation progress has begun."""
603604 if not self .evaluation_progress_live :
604605 self .evaluation_total_progress = make_progress_bar (
605- "Evaluating model batches" , self .console
606+ "Executing model batches" , self .console
606607 )
607608
608609 self .evaluation_model_progress = Progress (
@@ -656,31 +657,42 @@ def update_snapshot_evaluation_progress(
656657 ):
657658 total_batches = self .evaluation_model_batch_sizes [snapshot ]
658659 batch_num = str (batch_idx + 1 ).rjust (len (str (total_batches )))
659- batch = f"[{ batch_num } /{ total_batches } ]" .ljust (
660- self .EVAL_PROGRESS_BAR_COLUMN_WIDTHS ["batch" ]
661- )
660+ batch = f"[{ batch_num } /{ total_batches } ]" .ljust (self .PROGRESS_BAR_COLUMN_WIDTHS ["batch" ])
662661
663662 if duration_ms :
664- display_name = snapshot .display_name (
665- self .environment_naming_info ,
666- self .default_catalog if self .verbosity < Verbosity .VERY_VERBOSE else None ,
667- dialect = self .dialect ,
668- ).ljust (self .EVAL_PROGRESS_BAR_COLUMN_WIDTHS ["name" ])
663+ display_name = _justify_evaluation_model_info (
664+ snapshot .display_name (
665+ self .environment_naming_info ,
666+ self .default_catalog if self .verbosity < Verbosity .VERY_VERBOSE else None ,
667+ dialect = self .dialect ,
668+ ),
669+ self .PROGRESS_BAR_COLUMN_WIDTHS ["name" ],
670+ )
669671
670672 annotation = _create_evaluation_model_annotation (
671673 snapshot , _format_evaluation_model_interval (snapshot , interval )
672674 )
673-
675+ audits_str = ""
674676 if num_audits_passed :
675- annotation += f", { num_audits_passed } audits pass "
677+ audits_str += f" { CHECK_MARK } { num_audits_passed } "
676678 if num_audits_failed :
677- annotation += f", { num_audits_failed } audits fail { RED_X_MARK } "
678- annotation = (annotation + "]" ).ljust (
679- self .EVAL_PROGRESS_BAR_COLUMN_WIDTHS ["annotation" ]
679+ audits_str += f" { RED_X_MARK } { num_audits_failed } "
680+ audits_str = f", audits{ audits_str } " if audits_str else ""
681+
682+ annotation_width = self .PROGRESS_BAR_COLUMN_WIDTHS ["annotation" ]
683+ annotation = _justify_evaluation_model_info (
684+ annotation + audits_str ,
685+ annotation_width
686+ if not num_audits_failed
687+ else annotation_width - 1 , # -1 for RED_X_MARK's extra space
688+ dots_side = "right" ,
689+ prefix = " \[" ,
690+ suffix = "]" ,
680691 )
692+ annotation = annotation .replace (CHECK_MARK , GREEN_CHECK_MARK )
681693
682- duration = f"{ (duration_ms / 1000.0 ):.2f} s" .rjust (
683- self .EVAL_PROGRESS_BAR_COLUMN_WIDTHS ["duration" ]
694+ duration = f"{ (duration_ms / 1000.0 ):.2f} s" .ljust (
695+ self .PROGRESS_BAR_COLUMN_WIDTHS ["duration" ]
684696 )
685697
686698 self .evaluation_progress_live .console .print (
@@ -701,7 +713,7 @@ def stop_evaluation_progress(self, success: bool = True) -> None:
701713 if self .evaluation_progress_live :
702714 self .evaluation_progress_live .stop ()
703715 if success :
704- self .log_success (" Model batches evaluated successfully" )
716+ self .log_success (f" { GREEN_CHECK_MARK } Model batches executed successfully" )
705717
706718 self .evaluation_progress_live = None
707719 self .evaluation_total_progress = None
@@ -720,13 +732,12 @@ def start_creation_progress(
720732 ) -> None :
721733 """Indicates that a new creation progress has begun."""
722734 if self .creation_progress is None :
723- message = "Creating physical table" if total_tasks == 1 else "Creating physical tables"
724- self .creation_progress = make_progress_bar (message , self .console )
735+ self .creation_progress = make_progress_bar ("Updating physical layer" , self .console )
725736
726737 self ._print ("" )
727738 self .creation_progress .start ()
728739 self .creation_task = self .creation_progress .add_task (
729- "Creating physical tables ..." ,
740+ "Updating physical layer ..." ,
730741 total = total_tasks ,
731742 )
732743
@@ -738,7 +749,7 @@ def update_creation_progress(self, snapshot: SnapshotInfoLike) -> None:
738749 if self .creation_progress is not None and self .creation_task is not None :
739750 if self .verbosity >= Verbosity .VERBOSE :
740751 self .creation_progress .live .console .print (
741- f"{ GREEN_CHECK_MARK } { snapshot .display_name (self .environment_naming_info , self .default_catalog if self .verbosity < Verbosity .VERY_VERBOSE else None , dialect = self .dialect )} [green]created[/green]"
752+ f"{ GREEN_CHECK_MARK } { snapshot .display_name (self .environment_naming_info , self .default_catalog if self .verbosity < Verbosity .VERY_VERBOSE else None , dialect = self .dialect ). ljust ( self . PROGRESS_BAR_COLUMN_WIDTHS [ 'name' ]) } [green]created[/green]"
742753 )
743754 self .creation_progress .update (self .creation_task , refresh = True , advance = 1 )
744755
@@ -749,7 +760,7 @@ def stop_creation_progress(self, success: bool = True) -> None:
749760 self .creation_progress .stop ()
750761 self .creation_progress = None
751762 if success :
752- self .log_success (" \n Model versions created successfully" )
763+ self .log_success (f" \n { GREEN_CHECK_MARK } Physical layer updated successfully" )
753764
754765 self .environment_naming_info = EnvironmentNamingInfo ()
755766 self .default_catalog = None
@@ -790,7 +801,7 @@ def start_promotion_progress(
790801 if self .promotion_progress is None :
791802 self .promotion_progress = Progress (
792803 TextColumn (
793- f "[bold blue]Virtually updating ' { environment_naming_info . name } ' environment views " ,
804+ "[bold blue]Updating virtual layer " ,
794805 justify = "right" ,
795806 ),
796807 BarColumn (bar_width = PROGRESS_BAR_WIDTH ),
@@ -816,7 +827,7 @@ def update_promotion_progress(self, snapshot: SnapshotInfoLike, promoted: bool)
816827 check_mark = f"{ GREEN_CHECK_MARK } " if promoted else " "
817828 action_str = "[green]updated[/green]" if promoted else "[yellow]demoted[/yellow]"
818829 self .promotion_progress .live .console .print (
819- f"{ check_mark } { snapshot .display_name (self .environment_naming_info , self .default_catalog if self .verbosity < Verbosity .VERY_VERBOSE else None , dialect = self .dialect )} { action_str } "
830+ f"{ check_mark } { snapshot .display_name (self .environment_naming_info , self .default_catalog if self .verbosity < Verbosity .VERY_VERBOSE else None , dialect = self .dialect ). ljust ( self . PROGRESS_BAR_COLUMN_WIDTHS [ 'name' ]) } { action_str } "
820831 )
821832 self .promotion_progress .update (self .promotion_task , refresh = True , advance = 1 )
822833
@@ -827,7 +838,7 @@ def stop_promotion_progress(self, success: bool = True) -> None:
827838 self .promotion_progress .stop ()
828839 self .promotion_progress = None
829840 if success :
830- self .log_success (" \n Environment views updated successfully" )
841+ self .log_success (f" \n { GREEN_CHECK_MARK } Virtual layer updated successfully" )
831842
832843 self .environment_naming_info = EnvironmentNamingInfo ()
833844 self .default_catalog = None
@@ -2607,7 +2618,8 @@ def show_row_diff(
26072618 self ._write (row_diff )
26082619
26092620
2610- _CONSOLE : Console = NoopConsole ()
2621+ # _CONSOLE: Console = NoopConsole()
2622+ _CONSOLE : Console = TerminalConsole ()
26112623
26122624
26132625def set_console (console : Console ) -> None :
@@ -2723,111 +2735,70 @@ def _format_audits_errors(error: NodeAuditsErrors) -> str:
27232735
27242736
27252737def _format_evaluation_model_interval (snapshot : Snapshot , interval : Interval ) -> str :
2726- if snapshot .is_model :
2727- # only include time if interval < 1 day
2728- fmt_func = (
2729- to_ds
2730- if (interval [1 ] - interval [0 ]) >= datetime .timedelta (days = 1 ).total_seconds ()
2731- else to_tstz
2732- )
2733- return (
2734- f"insert { fmt_func (interval [0 ])} - { fmt_func (interval [1 ])} "
2735- if snapshot .model .kind .is_incremental
2736- or snapshot .model .kind .is_managed
2737- or snapshot .model .kind .is_custom
2738- else ""
2739- )
2738+ if snapshot .is_model and (
2739+ snapshot .model .kind .is_incremental
2740+ or snapshot .model .kind .is_managed
2741+ or snapshot .model .kind .is_custom
2742+ ):
2743+ # include time if interval < 1 day
2744+ if (interval [1 ] - interval [0 ]) < datetime .timedelta (days = 1 ).total_seconds () * 1000 :
2745+ return f"insert { to_ds (interval [0 ])} { to_datetime (interval [0 ]).strftime ('%H:%M:%S' )} -{ to_datetime (interval [1 ]).strftime ('%H:%M:%S' )} "
2746+ return f"insert { to_ds (interval [0 ])} - { to_ds (interval [1 ])} "
27402747 return ""
27412748
27422749
2743- def _create_evaluation_model_info (
2744- batched_intervals : t .Dict [Snapshot , Intervals ],
2745- model_batch_sizes : t .Dict [Snapshot , int ],
2746- environment_naming_info : EnvironmentNamingInfo ,
2747- default_catalog : t .Optional [str ],
2748- dialect : t .Optional [DialectType ],
2749- ) -> t .Tuple [t .Dict [Snapshot , t .Dict [str , t .Any ]], t .Dict [str , int ]]:
2750- """Creates model information dictionaries for model evaluation progress bar.
2750+ def _justify_evaluation_model_info (
2751+ text : str ,
2752+ length : int ,
2753+ justify_direction : str = "left" ,
2754+ dots_side : str = "left" ,
2755+ prefix : str = "" ,
2756+ suffix : str = "" ,
2757+ ) -> str :
2758+ """Format a model evaluation info string by justifying and truncating if needed.
27512759
2752- Parameters:
2753- batched_intervals: Dictionary mapping snapshot batches to their evaluation intervals
2754- model_batch_sizes: Dictionary mapping snapshots to their batch sizes
2755- environment_naming_info: Information about environment naming, needed to render model name
2756- default_catalog: Optional default catalog name for rendering model name
2757- dialect: Optional SQL dialect for rendering model name
2760+ Args:
2761+ text: The string to format
2762+ length: The desired number of characters in the returned string
2763+ justify_direction: The justification direction ("left" or "l" or "right" or "r")
2764+ dots_side: The side of the dots if truncation is needed ("left" or "l" or "right" or "r")
2765+ prefix: A prefix to add to the returned string
2766+ suffix: A suffix to add to the returned string
27582767
27592768 Returns:
2760- A tuple containing:
2761- - Dictionary mapping snapshots to their model's display information
2762- - Dictionary of output field names to column widths
2769+ The justified string, truncated with "..." if needed
27632770 """
2764- model_info : t .Dict [Snapshot , t .Dict [str , t .Any ]] = {}
2765- model_column_widths = {}
2766- model_column_widths ["display_name" ] = 0
2767- model_column_widths ["annotation" ] = 0
2768-
2769- for snapshot in batched_intervals :
2770- model_info [snapshot ] = {}
2771- model_info [snapshot ]["display_name" ] = snapshot .display_name (
2772- environment_naming_info , default_catalog , dialect = dialect
2773- )
2774- model_column_widths ["display_name" ] = max (
2775- model_column_widths ["display_name" ], len (model_info [snapshot ]["display_name" ])
2776- )
2777-
2778- # The annotation includes audit results. We cannot build the audits result string
2779- # until after evaluation occurs, but we must determine the annotation column width here.
2780- # Therefore, we add enough padding for the longest possible audits result string.
2781- audit_pad = 0
2782- if snapshot .is_model and snapshot .model .audits :
2783- num_audits = len (snapshot .model .audits_with_args )
2784- num_nonblocking_audits = sum (
2785- 1
2786- for audit in snapshot .model .audits_with_args
2787- if not audit [0 ].blocking
2788- or ("blocking" in audit [1 ] and audit [1 ]["blocking" ] == exp .false ())
2789- )
2790- # make enough room for all audits to pass
2791- audit_pad = len (f", { str (num_audits )} audits passed" )
2792- if num_nonblocking_audits :
2793- # and add enough room for all nonblocking audits to fail
2794- audit_pad += len (f", { str (num_nonblocking_audits )} audits failed X" ) # red X
2795- audit_pad += 1 # closing bracket
2796-
2797- model_info [snapshot ]["annotation" ] = [
2798- _create_evaluation_model_annotation (
2799- snapshot ,
2800- _format_evaluation_model_interval (snapshot , interval ),
2801- )
2802- for interval in batched_intervals [snapshot ]
2803- ]
2804- model_column_widths ["annotation" ] = max (
2805- model_column_widths ["annotation" ],
2806- max (len (annotation ) for annotation in model_info [snapshot ]["annotation" ]) + audit_pad ,
2771+ full_text = f"{ prefix } { text } { suffix } "
2772+ if len (full_text ) <= length :
2773+ return (
2774+ full_text .ljust (length )
2775+ if justify_direction .startswith ("l" )
2776+ else full_text .rjust (length )
28072777 )
28082778
2809- model_column_widths ["batch" ] = 5 # number characters in default "[1/1]"
2810- # do we need space for more than one digit?
2811- if any (size > 9 for size in model_batch_sizes .values ()):
2812- model_column_widths ["batch" ] = (
2813- max (len (str (size )) for size in model_batch_sizes .values ()) * 2
2814- ) + 3 # brackets and slash
2815-
2816- return model_info , model_column_widths
2779+ trunc_length = length - len (prefix ) - len (suffix )
2780+ truncated_text = (
2781+ "..." + text [- (trunc_length - 3 ) :]
2782+ if dots_side .startswith ("l" )
2783+ else text [: (trunc_length - 3 )] + "..."
2784+ )
2785+ return f"{ prefix } { truncated_text } { suffix } "
28172786
28182787
28192788def _create_evaluation_model_annotation (snapshot : Snapshot , interval_info : t .Optional [str ]) -> str :
2820- if snapshot .is_audit or (snapshot .is_model and snapshot .model .kind .is_external ):
2821- return " \[run audits"
2789+ if snapshot .is_audit :
2790+ return "run standalone audit"
2791+ if snapshot .is_model and snapshot .model .kind .is_external :
2792+ return "run external model audits"
28222793 if snapshot .model .kind .is_seed :
2823- return " \[ insert from seed file"
2794+ return "insert from seed file"
28242795 if snapshot .model .kind .is_full :
2825- return " \[ full refresh"
2796+ return "full refresh"
28262797 if snapshot .model .kind .is_view :
2827- return " \[ recreate view"
2798+ return "recreate view"
28282799 if snapshot .model .kind .is_incremental_by_unique_key :
2829- return " \[ insert or update rows"
2800+ return "insert or update rows"
28302801 if snapshot .model .kind .is_incremental_by_partition :
2831- return " \[ insert partition"
2802+ return "insert partition"
28322803
2833- return f" \[ { interval_info } " if interval_info else ""
2804+ return interval_info if interval_info else ""
0 commit comments