@@ -95,7 +95,7 @@ def stop_plan_evaluation(self) -> None:
9595 @abc .abstractmethod
9696 def start_evaluation_progress (
9797 self ,
98- batches : t .Dict [Snapshot , Intervals ],
98+ batch_sizes : t .Dict [Snapshot , int ],
9999 environment_naming_info : EnvironmentNamingInfo ,
100100 default_catalog : t .Optional [str ],
101101 ) -> None :
@@ -109,6 +109,7 @@ def start_snapshot_evaluation_progress(self, snapshot: Snapshot) -> None:
109109 def update_snapshot_evaluation_progress (
110110 self ,
111111 snapshot : Snapshot ,
112+ interval : Interval ,
112113 batch_idx : int ,
113114 duration_ms : t .Optional [int ],
114115 num_audits_passed : int ,
@@ -350,7 +351,7 @@ def stop_plan_evaluation(self) -> None:
350351
351352 def start_evaluation_progress (
352353 self ,
353- batches : t .Dict [Snapshot , Intervals ],
354+ batch_sizes : t .Dict [Snapshot , int ],
354355 environment_naming_info : EnvironmentNamingInfo ,
355356 default_catalog : t .Optional [str ],
356357 ) -> None :
@@ -362,6 +363,7 @@ def start_snapshot_evaluation_progress(self, snapshot: Snapshot) -> None:
362363 def update_snapshot_evaluation_progress (
363364 self ,
364365 snapshot : Snapshot ,
366+ interval : Interval ,
365367 batch_idx : int ,
366368 duration_ms : t .Optional [int ],
367369 num_audits_passed : int ,
@@ -531,6 +533,13 @@ class TerminalConsole(Console):
531533
532534 TABLE_DIFF_SOURCE_BLUE = "#0248ff"
533535
536+ EVAL_PROGRESS_BAR_COLUMN_WIDTHS : t .Dict [str , int ] = {
537+ "batch" : 9 ,
538+ "name" : 50 ,
539+ "annotation" : 50 ,
540+ "duration" : 8 ,
541+ }
542+
534543 def __init__ (
535544 self ,
536545 console : t .Optional [RichConsole ] = None ,
@@ -546,9 +555,6 @@ def __init__(
546555 self .evaluation_total_task : t .Optional [TaskID ] = None
547556 self .evaluation_model_progress : t .Optional [Progress ] = None
548557 self .evaluation_model_tasks : t .Dict [str , TaskID ] = {}
549- self .evaluation_model_batch_sizes : t .Dict [Snapshot , int ] = {}
550- self .evaluation_model_info : t .Dict [Snapshot , t .Dict [str , t .Any ]] = {}
551- self .evaluation_model_column_widths : t .Dict [str , int ] = {}
552558
553559 # Put in temporary values that are replaced when evaluating
554560 self .environment_naming_info = EnvironmentNamingInfo ()
@@ -589,28 +595,12 @@ def stop_plan_evaluation(self) -> None:
589595
590596 def start_evaluation_progress (
591597 self ,
592- batched_intervals : t .Dict [Snapshot , Intervals ],
598+ batch_sizes : t .Dict [Snapshot , int ],
593599 environment_naming_info : EnvironmentNamingInfo ,
594600 default_catalog : t .Optional [str ],
595601 ) -> None :
596602 """Indicates that a new snapshot evaluation progress has begun."""
597603 if not self .evaluation_progress_live :
598- self .evaluation_model_batch_sizes = {
599- snapshot : len (intervals ) for snapshot , intervals in batched_intervals .items ()
600- }
601- self .environment_naming_info = environment_naming_info
602- self .default_catalog = default_catalog
603-
604- self .evaluation_model_info , self .evaluation_model_column_widths = (
605- _create_evaluation_model_info (
606- batched_intervals ,
607- self .evaluation_model_batch_sizes ,
608- environment_naming_info ,
609- default_catalog ,
610- self .dialect ,
611- )
612- )
613-
614604 self .evaluation_total_progress = make_progress_bar (
615605 "Evaluating model batches" , self .console
616606 )
@@ -629,9 +619,13 @@ def start_evaluation_progress(
629619 self .evaluation_progress_live .start ()
630620
631621 self .evaluation_total_task = self .evaluation_total_progress .add_task (
632- "Evaluating models..." , total = sum (self . evaluation_model_batch_sizes .values ())
622+ "Evaluating models..." , total = sum (batch_sizes .values ())
633623 )
634624
625+ self .evaluation_model_batch_sizes = batch_sizes
626+ self .environment_naming_info = environment_naming_info
627+ self .default_catalog = default_catalog
628+
635629 def start_snapshot_evaluation_progress (self , snapshot : Snapshot ) -> None :
636630 if self .evaluation_model_progress and snapshot .name not in self .evaluation_model_tasks :
637631 display_name = snapshot .display_name (
@@ -648,6 +642,7 @@ def start_snapshot_evaluation_progress(self, snapshot: Snapshot) -> None:
648642 def update_snapshot_evaluation_progress (
649643 self ,
650644 snapshot : Snapshot ,
645+ interval : Interval ,
651646 batch_idx : int ,
652647 duration_ms : t .Optional [int ],
653648 num_audits_passed : int ,
@@ -661,26 +656,32 @@ def update_snapshot_evaluation_progress(
661656 ):
662657 total_batches = self .evaluation_model_batch_sizes [snapshot ]
663658 batch_num = str (batch_idx + 1 ).rjust (len (str (total_batches )))
664- batch = f"[{ batch_num } /{ total_batches } ] "
659+ batch = f"[{ batch_num } /{ total_batches } ]" .ljust (
660+ self .EVAL_PROGRESS_BAR_COLUMN_WIDTHS ["batch" ]
661+ )
665662
666663 if duration_ms :
667- display_name = self .evaluation_model_info [snapshot ]["display_name" ].ljust (
668- self .evaluation_model_column_widths ["display_name" ]
664+ display_name = snapshot .display_name (
665+ self .environment_naming_info ,
666+ self .default_catalog if self .verbosity < Verbosity .VERY_VERBOSE else None ,
667+ dialect = self .dialect ,
668+ ).ljust (self .EVAL_PROGRESS_BAR_COLUMN_WIDTHS ["name" ])
669+
670+ annotation = _create_evaluation_model_annotation (
671+ snapshot , _format_evaluation_model_interval (snapshot , interval )
669672 )
670673
671- annotation = self .evaluation_model_info [snapshot ]["annotation" ][batch_idx ]
672674 if num_audits_passed :
673675 annotation += f", { num_audits_passed } audits pass"
674676 if num_audits_failed :
675677 annotation += f", { num_audits_failed } audits fail { RED_X_MARK } "
676678 annotation = (annotation + "]" ).ljust (
677- self .evaluation_model_column_widths ["annotation" ]
679+ self .EVAL_PROGRESS_BAR_COLUMN_WIDTHS ["annotation" ]
678680 )
679681
680- # 8 characters for duration
681- # if the failed audit red X is present, the console adds an extra space
682- duration_width = 7 if num_audits_failed else 8
683- duration = f"{ (duration_ms / 1000.0 ):.2f} s" .rjust (duration_width )
682+ duration = f"{ (duration_ms / 1000.0 ):.2f} s" .rjust (
683+ self .EVAL_PROGRESS_BAR_COLUMN_WIDTHS ["duration" ]
684+ )
684685
685686 self .evaluation_progress_live .console .print (
686687 f"{ GREEN_CHECK_MARK } { batch } { display_name } { annotation } { duration } "
@@ -708,8 +709,6 @@ def stop_evaluation_progress(self, success: bool = True) -> None:
708709 self .evaluation_model_progress = None
709710 self .evaluation_model_tasks = {}
710711 self .evaluation_model_batch_sizes = {}
711- self .evaluation_model_info = {}
712- self .evaluation_model_column_widths = {}
713712 self .environment_naming_info = EnvironmentNamingInfo ()
714713 self .default_catalog = None
715714
@@ -2303,13 +2302,11 @@ def _confirm(self, message: str, **kwargs: t.Any) -> bool:
23032302
23042303 def start_evaluation_progress (
23052304 self ,
2306- batched_intervals : t .Dict [Snapshot , Intervals ],
2305+ batch_sizes : t .Dict [Snapshot , int ],
23072306 environment_naming_info : EnvironmentNamingInfo ,
23082307 default_catalog : t .Optional [str ],
23092308 ) -> None :
2310- self .evaluation_model_batch_sizes = {
2311- snapshot : len (intervals ) for snapshot , intervals in batched_intervals .items ()
2312- }
2309+ self .evaluation_model_batch_sizes = batch_sizes
23132310 self .evaluation_environment_naming_info = environment_naming_info
23142311 self .default_catalog = default_catalog
23152312
@@ -2328,6 +2325,7 @@ def start_snapshot_evaluation_progress(self, snapshot: Snapshot) -> None:
23282325 def update_snapshot_evaluation_progress (
23292326 self ,
23302327 snapshot : Snapshot ,
2328+ interval : Interval ,
23312329 batch_idx : int ,
23322330 duration_ms : t .Optional [int ],
23332331 num_audits_passed : int ,
@@ -2472,18 +2470,19 @@ def stop_plan_evaluation(self) -> None:
24722470
24732471 def start_evaluation_progress (
24742472 self ,
2475- batched_intervals : t .Dict [Snapshot , Intervals ],
2473+ batch_sizes : t .Dict [Snapshot , int ],
24762474 environment_naming_info : EnvironmentNamingInfo ,
24772475 default_catalog : t .Optional [str ],
24782476 ) -> None :
2479- self ._write (f"Starting evaluation for { len ( batched_intervals )} snapshots" )
2477+ self ._write (f"Starting evaluation for { sum ( batch_sizes . values () )} snapshots" )
24802478
24812479 def start_snapshot_evaluation_progress (self , snapshot : Snapshot ) -> None :
24822480 self ._write (f"Evaluating { snapshot .name } " )
24832481
24842482 def update_snapshot_evaluation_progress (
24852483 self ,
24862484 snapshot : Snapshot ,
2485+ interval : Interval ,
24872486 batch_idx : int ,
24882487 duration_ms : t .Optional [int ],
24892488 num_audits_passed : int ,
0 commit comments