def print_training_summary(self, gs):
print('The best CV score from GridSearchCV (by default averaging across k-fold CV) for ' + self.output_column + ' is:')
if self.took_log_of_y:
print(' Note that this score is calculated using the natural logs of the y values.')
print(gs.best_score_)
print('The best params were')
# Remove 'final_model__model' from what we print- it's redundant with model name, and is difficult to read quickly in a list since it's a python object.
if 'model' in gs.best_params_:
printing_copy = {}
for k, v in gs.best_params_.items():
if k != 'model':
printing_copy[k] = v
else:
printing_copy[k] = utils_models.get_name_from_model(v)
else:
printing_copy = gs.best_params_
print(printing_copy)
if self.verbose:
print('Here are all the hyperparameters that were tried:')
raw_scores = gs.grid_scores_
sorted_scores = sorted(raw_scores, key=lambda x: x[1], reverse=True)
for score in sorted_scores:
for k, v in score[0].items():
if k == 'model':
score[0][k] = utils_models.get_name_from_model(v)
print(score)
评论列表
文章目录