def generate_metrics(self):
"""Given a model id and a set of thresholds, obtain the y values (true
class and predicted probability) and calculate metrics for the
model at each threshold.
:param batch_timestamp: timestamps of model batches
:type batch_timestamp: list
:returns: None -- always returns None as default
:rtype: None
"""
# get the y-values
y_values = self.get_y_values()
# generate metrics at thresholds
eval_metrics_pct = self.threshold_pct.apply(self.evaluate_model_at_threshold,
args = (y_values['scores'],
y_values['y_true'],
True))
eval_metrics_abs = self.threshold_abs.apply(self.evaluate_model_at_threshold,
args = (y_values['scores'],
y_values['y_true'],
False))
# build table of metrics
eval_metrics = pd.concat([eval_metrics_pct, eval_metrics_abs])
eval_metrics_long = pd.melt(eval_metrics, id_vars = ['parameter'],
var_name = 'metric')
eval_metrics_long['unique_timestamp'] = self.model_id
auc = self.compute_AUC(y_values['y_true'], y_values['scores'])
final_metrics = eval_metrics_long.append({'parameter': 'roc',
'metric': 'auc',
'value': auc,
'unique_timestamp': self.model_id},
ignore_index = True)
metrics_cols = ['parameter', 'metric', 'value', 'unique_timestamp']
final_metrics = final_metrics[metrics_cols]
return(final_metrics)
评论列表
文章目录