def test_pytorch_backward(num_classes):
bounds = (0, 255)
channels = num_classes
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
def forward(self, x):
x = torch.mean(x, 3)
x = torch.squeeze(x, dim=3)
x = torch.mean(x, 2)
x = torch.squeeze(x, dim=2)
logits = x
return logits
model = Net()
model = PyTorchModel(
model,
bounds=bounds,
num_classes=num_classes,
cuda=False)
test_image = np.random.rand(channels, 5, 5).astype(np.float32)
test_grad_pre = np.random.rand(num_classes).astype(np.float32)
test_grad = model.backward(test_grad_pre, test_image)
assert test_grad.shape == test_image.shape
manual_grad = np.repeat(np.repeat(
(test_grad_pre / 25.).reshape((-1, 1, 1)),
5, axis=1), 5, axis=2)
np.testing.assert_almost_equal(
test_grad,
manual_grad)
python类squeeze()的实例源码
def bn_model_pytorch():
"""Same as bn_model but with PyTorch."""
import torch
import torch.nn as nn
bounds = (0, 1)
num_classes = 10
class Net(nn.Module):
def forward(self, x):
assert isinstance(x.data, torch.FloatTensor)
x = torch.mean(x, 3)
x = torch.squeeze(x, dim=3)
x = torch.mean(x, 2)
x = torch.squeeze(x, dim=2)
logits = x
return logits
model = Net()
model = PyTorchModel(
model,
bounds=bounds,
num_classes=num_classes,
cuda=False)
return model
def demo(img_path):
net = predict_net()
net.load_state_dict(torch.load('checkpoint/crowd_net2.pth'))
input_img = read_gray_img(img_path)
input_img = torch.autograd.Variable(torch.Tensor(input_img/255.0))
print(input_img.size())
#input_image = input_image.view(1, 3, 255, 255)
heat_map = net.forward(input_img)
print heat_map.size()
heat_map = torch.squeeze(heat_map)
heat_map = heat_map.data.numpy()
plt.imshow(heat_map, cmap = 'hot')
plt.show()
def build_loss(self, cls_score, bbox_pred, roi_data):
# classification loss
label = roi_data[1].squeeze()
fg_cnt = torch.sum(label.data.ne(0))
bg_cnt = label.data.numel() - fg_cnt
# for log
if self.debug:
maxv, predict = cls_score.data.max(1)
self.tp = torch.sum(predict[:fg_cnt].eq(label.data[:fg_cnt])) if fg_cnt > 0 else 0
self.tf = torch.sum(predict[fg_cnt:].eq(label.data[fg_cnt:]))
self.fg_cnt = fg_cnt
self.bg_cnt = bg_cnt
ce_weights = torch.ones(cls_score.size()[1])
ce_weights[0] = float(fg_cnt) / bg_cnt
ce_weights = ce_weights.cuda()
cross_entropy = F.cross_entropy(cls_score, label, weight=ce_weights)
# bounding box regression L1 loss
bbox_targets, bbox_inside_weights, bbox_outside_weights = roi_data[2:]
bbox_targets = torch.mul(bbox_targets, bbox_inside_weights)
bbox_pred = torch.mul(bbox_pred, bbox_inside_weights)
loss_box = F.smooth_l1_loss(bbox_pred, bbox_targets, size_average=False) / (fg_cnt + 1e-4)
return cross_entropy, loss_box
def forward(self,x):
# Stem convolution
out = self.conv1(x)
# Allocate memory banks
m = [[None for _ in range(d)] for d in self.D]
module_index = 0
for i,(incoming_channels,outgoing_channels,g_values, bs, trans) in enumerate(zip(
self.incoming,self.outgoing, self.G, self.bank_sizes, [self.trans1,self.trans2,None])):
# Write to initial memory banks
for j in range(out.size(1) // (bs * self.N) ):
m[i][j] = out[:, j * bs * self.N : (j + 1) * bs * self.N]
for read,write,g in zip(incoming_channels,outgoing_channels,g_values):
# Cat read tensors
inp = torch.cat([m[i][index] for index in read], 1)
# Apply module and increment op index
out = self.mod[module_index](inp)
module_index += 1
for j, w in enumerate(write):
# Allocate dat memory if it's None
if m[i][w] is None:
m[i][w] = out[:, (j % (g // bs)) * (bs * self.N) : (j % (g // bs) + 1) * (bs * self.N)]
# Else, if already written, add to it.
else:
m[i][w] = m[i][w] + out[:, (j % (g // bs)) * (bs * self.N) : (j % (g // bs) + 1) * (bs * self.N)]
if trans is not None:
out = trans(torch.cat(m[i], 1))
else:
out = torch.cat(m[i], 1)
out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), out.size(2)))
out = F.log_softmax(self.fc(out))
return out
def testModulus(self):
for jit in [True, False]:
modulus = sl.Modulus(jit=jit)
x = torch.cuda.FloatTensor(100,10,4,2).copy_(torch.rand(100,10,4,2))
y = modulus(x)
u = torch.squeeze(torch.sqrt(torch.sum(x * x, 3)))
v = y.narrow(3, 0, 1)
self.assertLess((u - v).abs().max(), 1e-6)
def __bool__(self):
if self.numel() == 0:
return False
elif self.numel() == 1:
return torch.squeeze(self)[0] != 0
raise RuntimeError("bool value of " + torch.typename(self) +
" containing more than one value is ambiguous")
def u_intnet(self, h_v, m_v, opt):
if opt['x_v'].ndimension():
input_tensor = torch.cat([h_v, opt['x_v'], torch.squeeze(m_v)], 1)
else:
input_tensor = torch.cat([h_v, torch.squeeze(m_v)], 1)
return self.learn_modules[0](input_tensor)
def u_mpnn(self, h_v, m_v, opt={}):
h_in = h_v.view(-1,h_v.size(2))
m_in = m_v.view(-1,m_v.size(2))
h_new = self.learn_modules[0](m_in[None,...],h_in[None,...])[0] # 0 or 1???
return torch.squeeze(h_new).view(h_v.size())
def forward(self, g, h_in, e):
h = []
# Padding to some larger dimension d
h_t = torch.cat([h_in, Variable(
torch.zeros(h_in.size(0), h_in.size(1), self.args['out'] - h_in.size(2)).type_as(h_in.data))], 2)
h.append(h_t.clone())
# Layer
for t in range(0, self.n_layers):
e_aux = e.view(-1, e.size(3))
h_aux = h[t].view(-1, h[t].size(2))
m = self.m[0].forward(h[t], h_aux, e_aux)
m = m.view(h[0].size(0), h[0].size(1), -1, m.size(1))
# Nodes without edge set message to 0
m = torch.unsqueeze(g, 3).expand_as(m) * m
m = torch.squeeze(torch.sum(m, 1))
h_t = self.u[0].forward(h[t], m)
# Delete virtual nodes
h_t = (torch.sum(h_in, 2).expand_as(h_t) > 0).type_as(h_t) * h_t
h.append(h_t)
# Readout
res = self.r.forward(h)
if self.type == 'classification':
res = nn.LogSoftmax()(res)
return res
def validate(val_loader, model, criterion, evaluation, logger=None):
losses = AverageMeter()
accuracies = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (g, h, e, target) in enumerate(val_loader):
# Prepare input data
target = torch.squeeze(target).type(torch.LongTensor)
if args.cuda:
g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)
# Compute output
output = model(g, h, e)
# Logs
test_loss = criterion(output, target)
acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])
losses.update(test_loss.data[0], g.size(0))
accuracies.update(acc.data[0], g.size(0))
print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'
.format(acc=accuracies, loss=losses))
if logger is not None:
logger.log_value('test_epoch_loss', losses.avg)
logger.log_value('test_epoch_accuracy', accuracies.avg)
return accuracies.avg
def validate(val_loader, model, criterion, evaluation, logger=None):
losses = AverageMeter()
accuracies = AverageMeter()
# switch to evaluate mode
model.eval()
for i, (g, h, e, target) in enumerate(val_loader):
# Prepare input data
target = torch.squeeze(target).type(torch.LongTensor)
if args.cuda:
g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)
# Compute output
output = model(g, h, e)
# Logs
test_loss = criterion(output, target)
acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])
losses.update(test_loss.data[0], g.size(0))
accuracies.update(acc.data[0], g.size(0))
print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'
.format(acc=accuracies, loss=losses))
if logger is not None:
logger.log_value('test_epoch_loss', losses.avg)
logger.log_value('test_epoch_accuracy', accuracies.avg)
return accuracies.avg
def validate(val_loader, model, criterion, evaluation, logger=None):
losses = AverageMeter()
accuracies = AverageMeter()
# switch to evaluate mode
model.eval()
for i, (g, h, e, target) in enumerate(val_loader):
# Prepare input data
target = torch.squeeze(target).type(torch.LongTensor)
if args.cuda:
g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)
# Compute output
output = model(g, h, e)
# Logs
test_loss = criterion(output, target)
acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])
losses.update(test_loss.data[0], g.size(0))
accuracies.update(acc.data[0], g.size(0))
print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'
.format(acc=accuracies, loss=losses))
if logger is not None:
logger.log_value('test_epoch_loss', losses.avg)
logger.log_value('test_epoch_accuracy', accuracies.avg)
return accuracies.avg
def validate(val_loader, model, criterion, evaluation, logger=None):
losses = AverageMeter()
accuracies = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (g, h, e, target) in enumerate(val_loader):
# Prepare input data
target = torch.squeeze(target).type(torch.LongTensor)
if args.cuda:
g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)
# Compute output
output = model(g, h, e)
# Logs
test_loss = criterion(output, target)
acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])
losses.update(test_loss.data[0], g.size(0))
accuracies.update(acc.data[0], g.size(0))
print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'
.format(acc=accuracies, loss=losses))
if logger is not None:
logger.log_value('test_epoch_loss', losses.avg)
logger.log_value('test_epoch_accuracy', accuracies.avg)
return accuracies.avg
def validate(val_loader, model, criterion, evaluation, logger=None):
losses = AverageMeter()
accuracies = AverageMeter()
# switch to evaluate mode
model.eval()
for i, (g, h, e, target) in enumerate(val_loader):
# Prepare input data
target = torch.squeeze(target).type(torch.LongTensor)
if args.cuda:
g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)
# Compute output
output = model(g, h, e)
# Logs
test_loss = criterion(output, target)
acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])
losses.update(test_loss.data[0], g.size(0))
accuracies.update(acc.data[0], g.size(0))
print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'
.format(acc=accuracies, loss=losses))
if logger is not None:
logger.log_value('test_epoch_loss', losses.avg)
logger.log_value('test_epoch_accuracy', accuracies.avg)
return accuracies.avg
def m_mpnn(self, h_v, h_w, e_vw, opt={}):
# Matrices for each edge
edge_output = self.learn_modules[0](e_vw)
edge_output = edge_output.view(-1, self.args['out'], self.args['in'])
h_w_rows = h_w[..., None].expand(h_w.size(0), h_v.size(1), h_w.size(1)).contiguous()
h_w_rows = h_w_rows.view(-1, self.args['in'])
h_multiply = torch.bmm(edge_output, torch.unsqueeze(h_w_rows,2))
m_new = torch.squeeze(h_multiply)
return m_new
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.dense3(out)
out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
out = F.log_softmax(self.fc(out))
return out
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.avgpool(x)
x = torch.squeeze(x)
x = F.log_softmax(x)
return x
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.avgpool(x)
x = torch.squeeze(x)
x = F.log_softmax(x)
return x
def forward(self, x):
x = self.conv_1_3x3.forward(x)
x = F.relu(self.bn_1.forward(x), inplace=True)
x = self.stage_1.forward(x)
x = self.stage_2.forward(x)
x = self.stage_3.forward(x)
x = F.avg_pool2d(x, 8, 1)
x = torch.squeeze(x)
x = F.log_softmax(x)
return x