def main():
global opt, model
opt = parser.parse_args()
print opt
cuda = opt.cuda
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
opt.seed = random.randint(1, 10000)
print("Random Seed: ", opt.seed)
torch.manual_seed(opt.seed)
if cuda:
torch.cuda.manual_seed(opt.seed)
cudnn.benchmark = True
print("===> Loading datasets")
train_set = DatasetFromHdf5("data/train.h5")
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
print("===> Building model")
model = Net()
criterion = nn.MSELoss(size_average=False)
print("===> Setting GPU")
if cuda:
model = torch.nn.DataParallel(model).cuda()
criterion = criterion.cuda()
# optionally resume from a checkpoint
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
checkpoint = torch.load(opt.resume)
opt.start_epoch = checkpoint["epoch"] + 1
model.load_state_dict(checkpoint["model"].state_dict())
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
# optionally copy weights from a checkpoint
if opt.pretrained:
if os.path.isfile(opt.pretrained):
print("=> loading model '{}'".format(opt.pretrained))
weights = torch.load(opt.pretrained)
model.load_state_dict(weights['model'].state_dict())
else:
print("=> no model found at '{}'".format(opt.pretrained))
print("===> Setting Optimizer")
optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay)
print("===> Training")
for epoch in range(opt.start_epoch, opt.nEpochs + 1):
train(training_data_loader, optimizer, model, criterion, epoch)
save_checkpoint(model, epoch)
python类benchmark()的实例源码
def main():
means = (104, 117, 123) # only support voc now
exp_name = 'CONV-SSD-{}-{}-bs-{}-{}-lr-{:05d}'.format(args.dataset, args.input_type,
args.batch_size, args.basenet[:-14], int(args.lr * 100000))
args.save_root += args.dataset+'/'
args.data_root += args.dataset+'/'
args.listid = '01' ## would be usefull in JHMDB-21
print('Exp name', exp_name, args.listid)
for iteration in [int(itr) for itr in args.eval_iter.split(',')]:
log_file = open(args.save_root + 'cache/' + exp_name + "/testing-{:d}.log".format(iteration), "w", 1)
log_file.write(exp_name + '\n')
trained_model_path = args.save_root + 'cache/' + exp_name + '/ssd300_ucf24_' + repr(iteration) + '.pth'
log_file.write(trained_model_path+'\n')
num_classes = len(CLASSES) + 1 #7 +1 background
net = build_ssd(300, num_classes) # initialize SSD
net.load_state_dict(torch.load(trained_model_path))
net.eval()
if args.cuda:
net = net.cuda()
cudnn.benchmark = True
print('Finished loading model %d !' % iteration)
# Load dataset
dataset = UCF24Detection(args.data_root, 'test', BaseTransform(args.ssd_dim, means), AnnotationTransform(),
input_type=args.input_type, full_test=True)
# evaluation
torch.cuda.synchronize()
tt0 = time.perf_counter()
log_file.write('Testing net \n')
mAP, ap_all, ap_strs = test_net(net, args.save_root, exp_name, args.input_type, dataset, iteration, num_classes)
for ap_str in ap_strs:
print(ap_str)
log_file.write(ap_str + '\n')
ptr_str = '\nMEANAP:::=>' + str(mAP) + '\n'
print(ptr_str)
log_file.write(ptr_str)
torch.cuda.synchronize()
print('Complete set time {:0.2f}'.format(time.perf_counter() - tt0))
log_file.close()
OneShotMiniImageNetBuilder.py 文件源码
项目:MatchingNetworks
作者: gitabcworld
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def build_experiment(self, batch_size, classes_per_set, samples_per_class, channels, fce):
"""
:param batch_size: The experiment batch size
:param classes_per_set: An integer indicating the number of classes per support set
:param samples_per_class: An integer indicating the number of samples per class
:param channels: The image channels
:param fce: Whether to use full context embeddings or not
:return: a matching_network object, along with the losses, the training ops and the init op
"""
# Data Loaders
self.train_loader = torch.utils.data.DataLoader(self.dataTrain, batch_size=batch_size,
shuffle=True, num_workers=4)
self.val_loader = torch.utils.data.DataLoader(self.dataVal, batch_size=batch_size,
shuffle=True, num_workers=4)
self.test_loader = torch.utils.data.DataLoader(self.dataTest, batch_size=batch_size,
shuffle=True, num_workers=4)
# Initialize parameters
self.classes_per_set = classes_per_set
self.samples_per_class = samples_per_class
self.keep_prob = torch.FloatTensor(1)
# Initialize model
self.matchingNet = MatchingNetwork(batch_size=batch_size,
keep_prob=self.keep_prob, num_channels=channels,
fce=fce,
num_classes_per_set=classes_per_set,
num_samples_per_class=samples_per_class,
nClasses = 0, image_size = 84)
self.isCudaAvailable = torch.cuda.is_available()
if self.isCudaAvailable:
cudnn.benchmark = True
torch.cuda.manual_seed_all(0)
self.matchingNet.cuda()
# Learning parameters
self.optimizer = 'adam'
self.lr = 1e-03
self.current_lr = 1e-03
self.lr_decay = 1e-6
self.wd = 1e-4
self.total_train_iter = 0
def __init__(self, num_items,
embedding_dim=32,
kernel_width=3,
dilation=1,
num_layers=1,
nonlinearity='tanh',
residual_connections=True,
sparse=False,
benchmark=True,
item_embedding_layer=None):
super(CNNNet, self).__init__()
cudnn.benchmark = benchmark
self.embedding_dim = embedding_dim
self.kernel_width = _to_iterable(kernel_width, num_layers)
self.dilation = _to_iterable(dilation, num_layers)
if nonlinearity == 'tanh':
self.nonlinearity = F.tanh
elif nonlinearity == 'relu':
self.nonlinearity = F.relu
else:
raise ValueError('Nonlinearity must be one of (tanh, relu)')
self.residual_connections = residual_connections
if item_embedding_layer is not None:
self.item_embeddings = item_embedding_layer
else:
self.item_embeddings = ScaledEmbedding(num_items, embedding_dim,
padding_idx=PADDING_IDX,
sparse=sparse)
self.item_biases = ZeroEmbedding(num_items, 1, sparse=sparse,
padding_idx=PADDING_IDX)
self.cnn_layers = [
nn.Conv2d(embedding_dim,
embedding_dim,
(_kernel_width, 1),
dilation=(_dilation, 1)) for
(_kernel_width, _dilation) in zip(self.kernel_width,
self.dilation)
]
for i, layer in enumerate(self.cnn_layers):
self.add_module('cnn_{}'.format(i),
layer)