You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
logger.notice(f"Default performance parameters are set to recall@1_pct and bias metric to tpr_disparity!")
968
+
logger.notice("==> In case your experiment doesn't have those parameters Triage will use one of the available. <==")
925
969
stats=self.experiment_stats()
926
970
927
971
ifstats['implemented_fewer_splits'] ==1:
928
-
print(f"Temporal config suggests {stats['timesplits_from_temporal_config']} temporal splits, but experiment implemented only {stats['validation_splits']} splits. Was this intentional?")
972
+
logger.notice(f"Temporal config suggests {stats['timesplits_from_temporal_config']} temporal splits, but experiment implemented only {stats['validation_splits']} splits. Was this intentional?")
print(f'On average, your cohorts contained around {round(cohorts.cohort_size.mean())} entities with a baserate of {round(cohorts.baserate.mean(), 3)}')
937
-
938
-
print(f"You built {stats['features']} features organized into {stats['feature_groups']} groups/blocks")
979
+
logger.notice(f'On average, your cohorts contained around {round(cohorts.cohort_size.mean())} entities with a baserate of {round(cohorts.baserate.mean(), 3)}')
939
980
940
-
print(f"Your model grid specification contained {stats['grid_size']} model types with {stats['models_needed']} individual models")
981
+
logger.notice(f"You built {stats['features']} features organized into {stats['feature_groups']} groups/blocks")
982
+
983
+
logger.notice(f"Your model grid specification contained {stats['grid_size']} model types with {stats['models_needed']} individual models")
941
984
942
985
## Models
943
986
num_models=len(self.models())
944
987
ifnum_models<stats['models_needed']:
945
-
print(f"However, the experiment only built {num_models} models. You are missing {stats['models_needed'] -num_models} models")
988
+
logger.notice(f"However, the experiment only built {num_models} models. You are missing {stats['models_needed'] -num_models} models")
946
989
947
990
else:
948
-
print(f"You successfully built all the {num_models} models")
991
+
logger.notice(f"You successfully built all the {num_models} models")
print(f"Your models acheived a best average {metric}{parameter} of {round(best_performance, 3)} over the {stats['validation_splits']} validation splits, with the Model Group {best_model_group},{best_model_type}. Note that model selection is more nuanced than average predictive performance over time. You could use Audition for model selection.")
998
+
999
+
# because we could change the value of the default parameter in case it doesn't exist,
1000
+
# it is safer to take it from the object itself.
1001
+
logger.notice(f"Your models achieved a best average {self.performance_metric}{self.threshold} of {round(best_performance, 3)} over the {stats['validation_splits']} validation splits, with the Model Group {best_model_group},{best_model_type}. Note that model selection is more nuanced than average predictive performance over time. You could use Audition for model selection.")
print(f"You created {len(res)} subsets of your cohort -- {', '.join([x['subset'] forxinres])}")
1017
+
logger.notice(f"You created {len(res)} subsets of your cohort -- {', '.join([x['subset'] forxinres])}")
973
1018
fordinres:
974
-
print(f"For subset '{d['subset'] }', Model Group {d['best_mod'][0]}, {d['best_mod'][1]} achieved the best average {metric}{parameter} of {d['best_perf']}")
1019
+
logger.notice(f"For subset '{d['subset'] }', Model Group {d['best_mod'][0]}, {d['best_mod'][1]} achieved the best average {metric}{parameter} of {d['best_perf']}")
0 commit comments