@@ -760,39 +760,9 @@ def start_evaluation_progress(
760760 )
761761
762762 # determine column widths
763- annotation_width = 0
764- for snapshot , intervals in batched_intervals .items ():
765- interval_str_len = 0
766- for interval in intervals :
767- interval_str_len = max (
768- interval_str_len ,
769- len (
770- _create_evaluation_model_annotation (
771- snapshot , _format_evaluation_model_interval (snapshot , interval )
772- )
773- ),
774- )
775-
776- # The annotation includes audit results. We cannot build the audits result string
777- # until after evaluation occurs, but we must determine the annotation column width here.
778- # Therefore, we add enough padding for the longest possible audits result string.
779- audit_str_len = 0
780- if snapshot .is_model and snapshot .model .audits :
781- num_audits = len (snapshot .model .audits_with_args )
782- num_nonblocking_audits = sum (
783- 1
784- for audit in snapshot .model .audits_with_args
785- if not audit [0 ].blocking
786- or ("blocking" in audit [1 ] and audit [1 ]["blocking" ] == exp .false ())
787- )
788- # make enough room for all audits to pass
789- audit_str_len = len (f", audits { CHECK_MARK } { str (num_audits )} " )
790- if num_nonblocking_audits :
791- # and add enough room for all nonblocking audits to fail
792- audit_str_len += len (f" { RED_X_MARK } { str (num_nonblocking_audits )} " ) + 1
793- annotation_width = max (annotation_width , interval_str_len + audit_str_len )
794763 self .evaluation_column_widths ["annotation" ] = (
795- annotation_width + 3 # brackets and opening escape backslash
764+ _calculate_annotation_str_len (batched_intervals )
765+ + 3 # brackets and opening escape backslash
796766 )
797767 self .evaluation_column_widths ["name" ] = max (
798768 len (
@@ -3261,3 +3231,47 @@ def _create_evaluation_model_annotation(snapshot: Snapshot, interval_info: t.Opt
32613231 return "insert partitions"
32623232
32633233 return interval_info if interval_info else ""
3234+
3235+
3236+ def _calculate_interval_str_len (batched_intervals : t .Dict [Snapshot , t .List [Interval ]]) -> int :
3237+ interval_str_len = 0
3238+ for snapshot , intervals in batched_intervals .items ():
3239+ for interval in intervals :
3240+ interval_str_len = max (
3241+ interval_str_len ,
3242+ len (
3243+ _create_evaluation_model_annotation (
3244+ snapshot , _format_evaluation_model_interval (snapshot , interval )
3245+ )
3246+ ),
3247+ )
3248+ return interval_str_len
3249+
3250+
3251+ def _calculate_audit_str_len (batched_intervals : t .Dict [Snapshot , t .List [Interval ]]) -> int :
3252+ # The annotation includes audit results. We cannot build the audits result string
3253+ # until after evaluation occurs, but we must determine the annotation column width here.
3254+ # Therefore, we add enough padding for the longest possible audits result string.
3255+ audit_str_len = 0
3256+ for snapshot in batched_intervals :
3257+ if snapshot .is_model and snapshot .model .audits :
3258+ num_audits = len (snapshot .model .audits_with_args )
3259+ num_nonblocking_audits = sum (
3260+ 1
3261+ for audit in snapshot .model .audits_with_args
3262+ if not audit [0 ].blocking
3263+ or ("blocking" in audit [1 ] and audit [1 ]["blocking" ] == exp .false ())
3264+ )
3265+ # make enough room for all audits to pass
3266+ audit_len = len (f", audits { CHECK_MARK } { str (num_audits )} " )
3267+ if num_nonblocking_audits :
3268+ # and add enough room for all nonblocking audits to fail
3269+ audit_len += len (f" { RED_X_MARK } { str (num_nonblocking_audits )} " ) + 1
3270+ audit_str_len = max (audit_str_len , audit_len )
3271+ return audit_str_len
3272+
3273+
3274+ def _calculate_annotation_str_len (batched_intervals : t .Dict [Snapshot , t .List [Interval ]]) -> int :
3275+ return _calculate_interval_str_len (batched_intervals ) + _calculate_audit_str_len (
3276+ batched_intervals
3277+ )
0 commit comments