def train_multilabel(features, targets, classes, train_split, test_split, C=1.0, ignore_hard_examples=True, after_ReLU=False, normalize_L2=False):
print('\nHyperparameters:\n - C: {}\n - after_ReLU: {}\n - normL2: {}'.format(C, after_ReLU, normalize_L2))
train_APs = []
test_APs = []
for class_id in range(len(classes)):
classifier = SVC(C=C, kernel='linear') # http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
if ignore_hard_examples:
train_masks = (targets[train_split][:,class_id] != 0).view(-1, 1)
train_features = torch.masked_select(features[train_split], train_masks.expand_as(features[train_split])).view(-1,features[train_split].size(1))
train_targets = torch.masked_select(targets[train_split], train_masks.expand_as(targets[train_split])).view(-1,targets[train_split].size(1))
test_masks = (targets[test_split][:,class_id] != 0).view(-1, 1)
test_features = torch.masked_select(features[test_split], test_masks.expand_as(features[test_split])).view(-1,features[test_split].size(1))
test_targets = torch.masked_select(targets[test_split], test_masks.expand_as(targets[test_split])).view(-1,targets[test_split].size(1))
else:
train_features = features[train_split]
train_targets = targets[train_split]
test_features = features[test_split]
test_targets = features[test_split]
if after_ReLU:
train_features[train_features < 0] = 0
test_features[test_features < 0] = 0
if normalize_L2:
train_norm = torch.norm(train_features, p=2, dim=1).unsqueeze(1)
train_features = train_features.div(train_norm.expand_as(train_features))
test_norm = torch.norm(test_features, p=2, dim=1).unsqueeze(1)
test_features = test_features.div(test_norm.expand_as(test_features))
train_X = train_features.numpy()
train_y = (train_targets[:,class_id] != -1).numpy() # uses hard examples if not ignored
test_X = test_features.numpy()
test_y = (test_targets[:,class_id] != -1).numpy()
classifier.fit(train_X, train_y) # train parameters of the classifier
train_preds = classifier.predict(train_X)
train_acc = accuracy_score(train_y, train_preds) * 100
train_AP = average_precision_score(train_y, train_preds) * 100
train_APs.append(train_AP)
test_preds = classifier.predict(test_X)
test_acc = accuracy_score(test_y, test_preds) * 100
test_AP = average_precision_score(test_y, test_preds) * 100
test_APs.append(test_AP)
print('class "{}" ({}/{}):'.format(classes[class_id], test_y.sum(), test_y.shape[0]))
print(' - {:8}: acc {:.2f}, AP {:.2f}'.format(train_split, train_acc, train_AP))
print(' - {:8}: acc {:.2f}, AP {:.2f}'.format(test_split, test_acc, test_AP))
print('all classes:')
print(' - {:8}: mAP {:.4f}'.format(train_split, sum(train_APs)/len(classes)))
print(' - {:8}: mAP {:.4f}'.format(test_split, sum(test_APs)/len(classes)))
##########################################################################
# main
##########################################################################
voc2007_extract.py 文件源码
python
阅读 23
收藏 0
点赞 0
评论 0
评论列表
文章目录