def forward(self, x):
for name, module in self.base._modules.items():
if name == 'avgpool':
break
x = module(x)
if self.cut_at_pooling:
return x
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(x.size(0), -1)
if self.has_embedding:
x = self.feat(x)
x = self.feat_bn(x)
if self.norm:
x = F.normalize(x)
elif self.has_embedding:
x = F.relu(x)
if self.dropout > 0:
x = self.drop(x)
if self.num_classes > 0:
x = self.classifier(x)
return x
python类relu()的实例源码
def __init__(self, num_points = 2500):
super(STN3d, self).__init__()
self.num_points = num_points
self.conv1 = nn.Conv1d(3, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.mp1 = nn.MaxPool1d(num_points)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 9)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
def forward(self, x):
residual = x
bottleneck = self.conv_reduce(x)
bottleneck = F.relu(self.bn_reduce(bottleneck), inplace=True)
bottleneck = self.conv_conv(bottleneck)
bottleneck = F.relu(self.bn(bottleneck), inplace=True)
bottleneck = self.conv_expand(bottleneck)
bottleneck = self.bn_expand(bottleneck)
if self.downsample is not None:
residual = self.downsample(x)
return F.relu(residual + bottleneck, inplace=True)
def forward(self, x):
if isinstance(x, list):
x, is_list, features = x[0], True, x[1:]
else:
is_list, features = False, None
residual = x
conv_a = self.conv_a(x)
bn_a = self.bn_a(conv_a)
relu_a = F.relu(bn_a, inplace=True)
conv_b = self.conv_b(relu_a)
bn_b = self.bn_b(conv_b)
if self.downsample is not None:
residual = self.downsample(x)
output = F.relu(residual + bn_b, inplace=True)
if is_list:
return [output] + features + [bn_a, bn_b]
else:
return output
def forward(self, x):
if isinstance(x, list):
assert len(x) == 1, 'The length of inputs must be one vs {}'.format(len(x))
x, is_list = x[0], True
else:
x, is_list = x, False
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
if is_list: x = [x]
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
if is_list:
x, features = x[0], x[1:]
else:
features = None
x = self.avgpool(x)
x = x.view(x.size(0), -1)
cls = self.classifier(x)
if is_list: return cls, features
else: return cls
def __init__(self, mode, anchors=9, classes=80, depth=4,
base_activation=F.relu,
output_activation=F.sigmoid):
super(SubNet, self).__init__()
self.anchors = anchors
self.classes = classes
self.depth = depth
self.base_activation = base_activation
self.output_activation = output_activation
self.subnet_base = nn.ModuleList([conv3x3(256, 256, padding=1)
for _ in range(depth)])
if mode == 'boxes':
self.subnet_output = conv3x3(256, 4 * self.anchors, padding=1)
elif mode == 'classes':
# add an extra dim for confidence
self.subnet_output = conv3x3(256, (1 + self.classes) * self.anchors, padding=1)
self._output_layer_init(self.subnet_output.bias.data)
def forward(self, x1, x2):
x1 = F.relu(self.bn1(self.conv1(x1)))
x1 = F.relu(self.bn2(self.conv2(x1)))
x1 = F.relu(self.bn3(self.conv3(x1)))
x1 = F.relu(self.bn4(self.conv4(x1)))
x1 = F.relu(self.bn5(self.conv5(x1)))
x1 = F.relu(self.bn6(self.conv6(x1)))
if self.training:
x2 = x1.clone()
else:
x2 = F.relu(self.bn1(self.conv1(x2)))
x2 = F.relu(self.bn2(self.conv2(x2)))
x2 = F.relu(self.bn3(self.conv3(x2)))
x2 = F.relu(self.bn4(self.conv4(x2)))
x2 = F.relu(self.bn5(self.conv5(x2)))
x2 = F.relu(self.bn6(self.conv6(x2)))
return x1, x2
def forward(self, mid_input, global_input):
w = mid_input.size()[2]
h = mid_input.size()[3]
global_input = global_input.unsqueeze(2).unsqueeze(2).expand_as(mid_input)
fusion_layer = torch.cat((mid_input, global_input), 1)
fusion_layer = fusion_layer.permute(2, 3, 0, 1).contiguous()
fusion_layer = fusion_layer.view(-1, 512)
fusion_layer = self.bn1(self.fc1(fusion_layer))
fusion_layer = fusion_layer.view(w, h, -1, 256)
x = fusion_layer.permute(2, 3, 0, 1).contiguous()
x = F.relu(self.bn2(self.conv1(x)))
x = self.upsample(x)
x = F.relu(self.bn3(self.conv2(x)))
x = F.relu(self.bn4(self.conv3(x)))
x = self.upsample(x)
x = F.sigmoid(self.bn5(self.conv4(x)))
x = self.upsample(self.conv5(x))
return x
def forward(self, x):
"""
Compute the forward pass of the composite transformation H(x),
where x is the concatenation of the current and all preceding
feature maps.
"""
if self.bottleneck:
out = self.conv1(F.relu(self.bn1(x)))
if self.p > 0:
out = F.dropout(out, p=self.p, training=self.training)
out = self.conv2(F.relu(self.bn2(out)))
if self.p > 0:
out = F.dropout(out, p=self.p, training=self.training)
else:
out = self.conv2(F.relu(self.bn2(x)))
if self.p > 0:
out = F.dropout(out, p=self.p, training=self.training)
return torch.cat((x, out), 1)
def forward(self, x):
n_idx = 0
c_idx = 1
h_idx = 2
w_idx = 3
x = self.lookup_table(x)
x = x.unsqueeze(c_idx)
enc_outs = []
for encoder in self.encoders:
enc_ = F.relu(encoder(x))
k_h = enc_.size()[h_idx]
enc_ = F.max_pool2d(enc_, kernel_size=(k_h, 1))
enc_ = enc_.squeeze(w_idx)
enc_ = enc_.squeeze(h_idx)
enc_outs.append(enc_)
encoding = self.dropout(torch.cat(enc_outs, 1))
return F.log_softmax(self.logistic(encoding))
def forward(self, X):
h = F.relu(self.conv1_1(X))
h = F.relu(self.conv1_2(h))
relu1_2 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
relu2_2 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.relu(self.conv3_3(h))
relu3_3 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv4_1(h))
h = F.relu(self.conv4_2(h))
h = F.relu(self.conv4_3(h))
relu4_3 = h
return [relu1_2,relu2_2,relu3_3,relu4_3]
def forward(self, x):
en0 = self.c0(x)
en1 = self.bnc1(self.c1(F.leaky_relu(en0, negative_slope=0.2)))
en2 = self.bnc2(self.c2(F.leaky_relu(en1, negative_slope=0.2)))
en3 = self.bnc3(self.c3(F.leaky_relu(en2, negative_slope=0.2)))
en4 = self.bnc4(self.c4(F.leaky_relu(en3, negative_slope=0.2)))
en5 = self.bnc5(self.c5(F.leaky_relu(en4, negative_slope=0.2)))
en6 = self.bnc6(self.c6(F.leaky_relu(en5, negative_slope=0.2)))
en7 = self.c7(F.leaky_relu(en6, negative_slope=0.2))
de7 = self.bnd7(self.d7(F.relu(en7)))
de6 = F.dropout(self.bnd6(self.d6(F.relu(torch.cat((en6, de7),1)))))
de5 = F.dropout(self.bnd5(self.d5(F.relu(torch.cat((en5, de6),1)))))
de4 = F.dropout(self.bnd4(self.d4(F.relu(torch.cat((en4, de5),1)))))
de3 = self.bnd3(self.d3(F.relu(torch.cat((en3, de4),1))))
de2 = self.bnd2(self.d2(F.relu(torch.cat((en2, de3),1))))
de1 = self.bnd1(self.d1(F.relu(torch.cat((en1, de2),1))))
de0 = F.tanh(self.d0(F.relu(torch.cat((en0, de1),1))))
return de0
def vgg(inputs, model):
'''VGG definition with style and content outputs.
'''
style, content = [], []
def block(x, ids):
for i in ids:
x = F.relu(F.conv2d(x, Variable(model.features[i].weight.data.cuda()),Variable(model.features[i].bias.data.cuda()), 1, 1), inplace=True)
if i in style_layers:
style.append(gram(x))
if i in content_layers:
content.append(x)
return F.max_pool2d(x, 2, 2)
o = block(inputs, [0, 2])
o = block(o, [5, 7])
o = block(o, [10, 12, 14])
o = block(o, [17, 19, 21])
o = block(o, [24, 26, 28])
return style, content
def forward(self, x):
x = self.embed(x) # (N,W,D)
if self.args.static:
x = Variable(x)
x = x.unsqueeze(1) # (N,Ci,W,D)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
x = torch.cat(x, 1)
'''
x1 = self.conv_and_pool(x,self.conv13) #(N,Co)
x2 = self.conv_and_pool(x,self.conv14) #(N,Co)
x3 = self.conv_and_pool(x,self.conv15) #(N,Co)
x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)
'''
x = self.dropout(x) # (N,len(Ks)*Co)
logit = self.fc1(x) # (N,C)
return logit
def reset_parameters(self):
self.apply(weights_init)
relu_gain = nn.init.calculate_gain('relu')
self.conv1.weight.data.mul_(relu_gain)
self.conv2.weight.data.mul_(relu_gain)
self.conv3.weight.data.mul_(relu_gain)
self.linear1.weight.data.mul_(relu_gain)
if hasattr(self, 'gru'):
orthogonal(self.gru.weight_ih.data)
orthogonal(self.gru.weight_hh.data)
self.gru.bias_ih.data.fill_(0)
self.gru.bias_hh.data.fill_(0)
if self.dist.__class__.__name__ == "DiagGaussian":
self.dist.fc_mean.weight.data.mul_(0.01)
model_CNN.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def forward(self, x):
x = self.embed(x) # (N,W,D)
x = self.dropout_embed(x)
x = x.unsqueeze(1) # (N,Ci,W,D)
if self.args.batch_normalizations is True:
x = [self.convs1_bn(F.tanh(conv(x))).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
else:
# x = [self.dropout(F.relu(conv(x)).squeeze(3)) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
# x = [self.dropout(F.tanh(conv(x)).squeeze(3)) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
# x = [F.tanh(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
x = torch.cat(x, 1)
x = self.dropout(x) # (N,len(Ks)*Co)
if self.args.batch_normalizations is True:
x = self.fc1_bn(self.fc1(x))
logit = self.fc2_bn(self.fc2(F.tanh(x)))
else:
logit = self.fc(x)
return logit
model.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def forward(self, x):
# print("aa", x)
x = self.embed(x) # (N,W,D)
# print("embed", x)
if self.args.static:
x = Variable(x.data)
# print("var", x)
x = x.unsqueeze(1) # (N,Ci,W,D)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
x = torch.cat(x, 1)
'''
x1 = self.conv_and_pool(x,self.conv13) #(N,Co)
x2 = self.conv_and_pool(x,self.conv14) #(N,Co)
x3 = self.conv_and_pool(x,self.conv15) #(N,Co)
x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)
'''
x = self.dropout(x) # (N,len(Ks)*Co)
logit = self.fc1(x) # (N,C)
return logit
model_CLSTM.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def forward(self, x):
embed = self.embed(x)
# CNN
cnn_x = embed
cnn_x = self.dropout(cnn_x)
cnn_x = cnn_x.unsqueeze(1)
cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks)
cnn_x = torch.cat(cnn_x, 0)
cnn_x = torch.transpose(cnn_x, 1, 2)
# LSTM
lstm_out, self.hidden = self.lstm(cnn_x, self.hidden)
lstm_out = torch.transpose(lstm_out, 0, 1)
lstm_out = torch.transpose(lstm_out, 1, 2)
lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2)).squeeze(2)
# linear
cnn_lstm_out = self.hidden2label1(F.tanh(lstm_out))
cnn_lstm_out = self.hidden2label2(F.tanh(cnn_lstm_out))
# output
logit = cnn_lstm_out
return logit
model_CBiLSTM.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def forward(self, x):
embed = self.embed(x)
# CNN
embed = self.dropout(embed)
cnn_x = embed
cnn_x = cnn_x.unsqueeze(1)
cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks)
cnn_x = torch.cat(cnn_x, 0)
cnn_x = torch.transpose(cnn_x, 1, 2)
# BiLSTM
bilstm_out, self.hidden = self.bilstm(cnn_x, self.hidden)
bilstm_out = torch.transpose(bilstm_out, 0, 1)
bilstm_out = torch.transpose(bilstm_out, 1, 2)
bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
# linear
cnn_bilstm_out = self.hidden2label1(F.tanh(bilstm_out))
cnn_bilstm_out = self.hidden2label2(F.tanh(cnn_bilstm_out))
# dropout
logit = self.dropout(cnn_bilstm_out)
return logit
model_DeepCNN.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def forward(self, x):
one_layer = self.embed(x) # (N,W,D) # torch.Size([64, 43, 300])
# one_layer = self.dropout(one_layer)
one_layer = one_layer.unsqueeze(1) # (N,Ci,W,D) # torch.Size([64, 1, 43, 300])
# one layer
one_layer = [torch.transpose(F.relu(conv(one_layer)).squeeze(3), 1, 2) for conv in self.convs1] # torch.Size([64, 100, 36])
# two layer
two_layer = [F.relu(conv(one_layer.unsqueeze(1))).squeeze(3) for (conv, one_layer) in zip(self.convs2, one_layer)]
print("two_layer {}".format(two_layer[0].size()))
# pooling
output = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in two_layer] # torch.Size([64, 100]) torch.Size([64, 100])
output = torch.cat(output, 1) # torch.Size([64, 300])
# dropout
output = self.dropout(output)
# linear
output = self.fc1(F.relu(output))
logit = self.fc2(F.relu(output))
return logit
model_CGRU.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 104
收藏 0
点赞 0
评论 0
def forward(self, x):
embed = self.embed(x)
# CNN
cnn_x = embed
cnn_x = self.dropout(cnn_x)
cnn_x = cnn_x.unsqueeze(1)
cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks)
cnn_x = torch.cat(cnn_x, 0)
cnn_x = torch.transpose(cnn_x, 1, 2)
# GRU
lstm_out, self.hidden = self.gru(cnn_x, self.hidden)
lstm_out = torch.transpose(lstm_out, 0, 1)
lstm_out = torch.transpose(lstm_out, 1, 2)
lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2)).squeeze(2)
# linear
cnn_lstm_out = self.hidden2label1(F.tanh(lstm_out))
cnn_lstm_out = self.hidden2label2(F.tanh(cnn_lstm_out))
# output
logit = cnn_lstm_out
return logit
def forward(self, X):
h = F.relu(self.conv1_1(X))
h = F.relu(self.conv1_2(h))
relu1_2 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
relu2_2 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.relu(self.conv3_3(h))
relu3_3 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv4_1(h))
h = F.relu(self.conv4_2(h))
h = F.relu(self.conv4_3(h))
relu4_3 = h
return [relu1_2, relu2_2, relu3_3, relu4_3]
def forward(self, x):
# If we're not training this layer, set to eval mode so that we use
# running batchnorm stats (both for time-saving and to avoid updating
# said stats).
if not self.active:
self.eval()
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat((x, out), 1)
# If we're not active, return a detached output to prevent backprop.
if self.active:
return out
else:
return out.detach()
def _make_grouped_conv1x1(self, in_channels, out_channels, groups,
batch_norm=True, relu=False):
modules = OrderedDict()
conv = conv1x1(in_channels, out_channels, groups=groups)
modules['conv1x1'] = conv
if batch_norm:
modules['batch_norm'] = nn.BatchNorm2d(out_channels)
if relu:
modules['relu'] = nn.ReLU()
if len(modules) > 1:
return nn.Sequential(modules)
else:
return conv
def forward(self, x):
# save for combining later with output
residual = x
if self.combine == 'concat':
residual = F.avg_pool2d(residual, kernel_size=3,
stride=2, padding=1)
out = self.g_conv_1x1_compress(x)
out = channel_shuffle(out, self.groups)
out = self.depthwise_conv3x3(out)
out = self.bn_after_depthwise(out)
out = self.g_conv_1x1_expand(out)
out = self._combine_func(residual, out)
return F.relu(out)
def forward(self, prev_samples, upper_tier_conditioning):
(batch_size, _, _) = upper_tier_conditioning.size()
prev_samples = self.embedding(
prev_samples.contiguous().view(-1)
).view(
batch_size, -1, self.q_levels
)
prev_samples = prev_samples.permute(0, 2, 1)
upper_tier_conditioning = upper_tier_conditioning.permute(0, 2, 1)
x = F.relu(self.input(prev_samples) + upper_tier_conditioning)
x = F.relu(self.hidden(x))
x = self.output(x).permute(0, 2, 1).contiguous()
return F.log_softmax(x.view(-1, self.q_levels)) \
.view(batch_size, -1, self.q_levels)
def forward(self, x):
# x (bz x 3 x 2048) -> conv(3, 64) -> conv(64, 128) -> conv(128, 1024) -> max_pool(2048) -> 1024 -> fc(1024, 512)
# -> fc(512, 256) -> fc(256, 9)
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.mp1(x)
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x) # bz x 9
# identity transform
# bz x 9
iden = Variable(torch.from_numpy(np.array([1,0,0,0,1,0,0,0,1]).astype(np.float32))).view(1,9).repeat(batchsize,1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, 3, 3) # bz x 3 x 3
return x
# 128 x 128 transform
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x))) # bz x 256 x 2048
x = F.relu(self.bn2(self.conv2(x))) # bz x 1024 x 2048
x = self.mp1(x) # bz x 1024 x 1
x = x.view(-1, 1024)
x = F.relu(self.bn3(self.fc1(x))) # bz x 512
x = F.relu(self.bn4(self.fc2(x))) # bz x 256
x = self.fc3(x) # bz x (128*128)
# identity transform
# bz x (128*128)
iden = Variable(torch.from_numpy(np.eye(128).astype(np.float32))).view(1,128*128).repeat(batchsize,1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, 128, 128) # bz x 3 x 3
return x
def forward(self, x):
batchsize = x.size()[0]
trans = self.stn(x) # regressing the transforming parameters using STN
x = x.transpose(2,1) # bz x 2048 x 3
x = torch.bmm(x, trans) # (bz x 2048 x 3) x (bz x 3 x 3)
x = x.transpose(2,1) # bz x 3 x 2048
x = F.relu(self.bn1(self.conv1(x)))
pointfeat = x # bz x 64 x 2048
x = F.relu(self.bn2(self.conv2(x))) # bz x 128 x 2048
x = self.bn3(self.conv3(x)) # bz x 1024 x 2048
x = self.mp1(x)
x = x.view(-1, 1024) # bz x 1024
if self.global_feat: # using global feats for classification
return x, trans
else:
x = x.view(-1, 1024, 1).repeat(1, 1, self.num_points)
return torch.cat([x, pointfeat], 1), trans
def forward(self, x_in):
out = F.relu(F.max_pool3d(self.conv(x_in), (1, self.max_document_length,1)))
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.dropout(out, training=self.training)
out = self.fc2(out)
return F.log_softmax(out)