def calc_model_characteristics(self, performCV=True):
# Determine key metrics to analyze the classification model. These
# are stored in the classification_output series object belonginf to
# this class.
for metric in [self.scoring_metric]+self.additional_display_metrics:
#Determine for both test and train, except predict:
for key,data in self.dp.items():
if key!='predict':
name = '%s_%s'%(metric,key)
#Case where probabilities to be passed as arguments
if base_classification.metrics_map[metric][2]:
self.classification_output[name] = \
base_classification.metrics_map[metric][0](
data[self.datablock.target],
self.predictions_probabilities[key])
#case where class predictions to be passed as arguments
else:
self.classification_output[name] = \
base_classification.metrics_map[metric][0](
data[self.datablock.target],
self.predictions_class[key])
#Determine confusion matrix:
name = 'ConfusionMatrix_%s'%key
self.classification_output[name] = pd.crosstab(
data[self.datablock.target],
self.predictions_class[key]
).to_string()
if performCV:
cv_score = self.KFold_CrossValidation(
scoring_metric=self.scoring_metric)
else:
cv_score = {
'mean_error': 0.0,
'std_error': 0.0
}
self.classification_output['CVMethod'] = \
'KFold - ' + str(self.cv_folds)
self.classification_output['CVScore_mean'] = cv_score['mean_error']
self.classification_output['CVScore_std'] = cv_score['std_error']
self.classification_output['Predictors'] = str(self.predictors)
评论列表
文章目录