def forward(self, image):
image = image.cuda()
conv_layer1 = self.bn1(leaky_relu(self.conv1(image), negative_slope = 0.2))
conv_layer2 = self.bn2(leaky_relu(self.conv2(conv_layer1), negative_slope = 0.2))
conv_layer3 = leaky_relu(self.conv3(conv_layer2), negative_slope = 0.2)
fc_layer1 = self.linear1(conv_layer3.view(-1, 4*4*512))
return sigmoid(fc_layer1)
python类leaky_relu()的实例源码
def forward(self, z):
z = z.view(z.size(0), z.size(1), 1, 1)
out = self.fc(z) # (?, 512, 4, 4)
out = F.leaky_relu(self.deconv1(out), 0.05) # (?, 256, 8, 8)
out = F.leaky_relu(self.deconv2(out), 0.05) # (?, 128, 16, 16)
out = F.leaky_relu(self.deconv3(out), 0.05) # (?, 64, 32, 32)
out = F.tanh(self.deconv4(out)) # (?, 3, 64, 64)
return out
def forward(self, x):
out = F.leaky_relu(self.conv1(x), 0.05) # (?, 64, 32, 32)
out = F.leaky_relu(self.conv2(out), 0.05) # (?, 128, 16, 16)
out = F.leaky_relu(self.conv3(out), 0.05) # (?, 256, 8, 8)
out = F.leaky_relu(self.conv4(out), 0.05) # (?, 512, 4, 4)
out = F.sigmoid(self.conv5(out)).squeeze()
return out
def forward(self, x):
x = F.leaky_relu(self.map1(x), 0.1)
return F.sigmoid(self.map2(x))
def forward(self, x):
x = F.leaky_relu(self.map1(x), 0.1)
x = F.leaky_relu(self.map2(x), 0.1)
return F.sigmoid(self.map3(x))
def __init__(self, input_nch, output_nch, kernel_size=3, activation=F.leaky_relu, use_bn=True, same_conv=True):
super(UNetConvBlock, self).__init__()
padding = kernel_size // 2 if same_conv else 0 # only support odd kernel
self.conv0 = nn.Conv2d(input_nch, output_nch, kernel_size, padding=padding)
self.conv1 = nn.Conv2d(output_nch, output_nch, kernel_size, padding=padding)
self.act = activation
self.batch_norm = nn.BatchNorm2d(output_nch) if use_bn else None
def forward(self, x):
feature = self.features(x)
output = self.yolo(feature)
output = self.flatten(output)
output = F.leaky_relu(self.fc1(output), 0.1)
output = self.fc2(output)
return output
pytorch_MNIST_GAN.py 文件源码
项目:pytorch-MNIST-CelebA-GAN-DCGAN
作者: znxlwm
项目源码
文件源码
阅读 14
收藏 0
点赞 0
评论 0
def forward(self, input):
x = F.leaky_relu(self.fc1(input), 0.2)
x = F.leaky_relu(self.fc2(x), 0.2)
x = F.leaky_relu(self.fc3(x), 0.2)
x = F.tanh(self.fc4(x))
return x
pytorch_MNIST_GAN.py 文件源码
项目:pytorch-MNIST-CelebA-GAN-DCGAN
作者: znxlwm
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def forward(self, input):
x = F.leaky_relu(self.fc1(input), 0.2)
x = F.dropout(x, 0.3)
x = F.leaky_relu(self.fc2(x), 0.2)
x = F.dropout(x, 0.3)
x = F.leaky_relu(self.fc3(x), 0.2)
x = F.dropout(x, 0.3)
x = F.sigmoid(self.fc4(x))
return x
pytorch_MNIST_DCGAN.py 文件源码
项目:pytorch-MNIST-CelebA-GAN-DCGAN
作者: znxlwm
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def forward(self, input):
x = F.leaky_relu(self.conv1(input), 0.2)
x = F.leaky_relu(self.conv2_bn(self.conv2(x)), 0.2)
x = F.leaky_relu(self.conv3_bn(self.conv3(x)), 0.2)
x = F.leaky_relu(self.conv4_bn(self.conv4(x)), 0.2)
x = F.sigmoid(self.conv5(x))
return x
pytorch_CelebA_DCGAN.py 文件源码
项目:pytorch-MNIST-CelebA-GAN-DCGAN
作者: znxlwm
项目源码
文件源码
阅读 15
收藏 0
点赞 0
评论 0
def forward(self, input):
x = F.leaky_relu(self.conv1(input), 0.2)
x = F.leaky_relu(self.conv2_bn(self.conv2(x)), 0.2)
x = F.leaky_relu(self.conv3_bn(self.conv3(x)), 0.2)
x = F.leaky_relu(self.conv4_bn(self.conv4(x)), 0.2)
x = F.sigmoid(self.conv5(x))
return x
def forward(self, x):
if self.has_mean:
batch_size = x.data.size(0)
x = x - torch.autograd.Variable(self.mean_img.repeat(batch_size, 1, 1, 1))
ind = -2
self.loss = None
outputs = dict()
for block in self.blocks:
ind = ind + 1
#if ind > 14:
# return x
if block['type'] == 'net':
continue
elif block['type'] == 'convolutional' or block['type'] == 'maxpool' or block['type'] == 'reorg' or block['type'] == 'avgpool' or block['type'] == 'softmax' or block['type'] == 'connected' or block['type'] == 'dropout':
x = self.models[ind](x)
outputs[ind] = x
elif block['type'] == 'route':
layers = block['layers'].split(',')
layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]
if len(layers) == 1:
x = outputs[layers[0]]
outputs[ind] = x
elif len(layers) == 2:
x1 = outputs[layers[0]]
x2 = outputs[layers[1]]
x = torch.cat((x1,x2),1)
outputs[ind] = x
elif block['type'] == 'shortcut':
from_layer = int(block['from'])
activation = block['activation']
from_layer = from_layer if from_layer > 0 else from_layer + ind
x1 = outputs[from_layer]
x2 = outputs[ind-1]
x = x1 + x2
if activation == 'leaky':
x = F.leaky_relu(x, 0.1, inplace=True)
elif activation == 'relu':
x = F.relu(x, inplace=True)
outputs[ind] = x
elif block['type'] == 'cost':
continue
elif block['type'] == 'region':
continue
else:
print('unknown type %s' % (block['type']))
return x
def __init__(self,
embeddings,
n_labels,
n_blstm_layers=3,
lstm_hidden_size=400,
use_gru=False,
n_arc_mlp_layers=1,
n_arc_mlp_units=500,
n_label_mlp_layers=1,
n_label_mlp_units=100,
mlp_activation=F.leaky_relu,
embeddings_dropout=0.33,
lstm_dropout=0.33,
arc_mlp_dropout=0.33,
label_mlp_dropout=0.33,
pad_id=0):
super(DeepBiaffine, self).__init__()
self._pad_id = pad_id
blstm_cls = nn.GRU if use_gru else nn.LSTM
self.embed = Embed(*embeddings, dropout=embeddings_dropout,
padding_idx=pad_id)
embed_size = self.embed.size - self.embed[0].weight.data.shape[1]
self.blstm = blstm_cls(
num_layers=n_blstm_layers,
input_size=embed_size,
hidden_size=(lstm_hidden_size
if lstm_hidden_size is not None else embed_size),
batch_first=True,
dropout=lstm_dropout,
bidirectional=True
)
layers = [MLP.Layer(lstm_hidden_size * 2, n_arc_mlp_units,
mlp_activation, arc_mlp_dropout)
for i in range(n_arc_mlp_layers)]
self.mlp_arc_head = MLP(layers)
layers = [MLP.Layer(lstm_hidden_size * 2, n_arc_mlp_units,
mlp_activation, arc_mlp_dropout)
for i in range(n_arc_mlp_layers)]
self.mlp_arc_dep = MLP(layers)
layers = [MLP.Layer(lstm_hidden_size * 2, n_label_mlp_units,
mlp_activation, label_mlp_dropout)
for i in range(n_label_mlp_layers)]
self.mlp_label_head = MLP(layers)
layers = [MLP.Layer(lstm_hidden_size * 2, n_label_mlp_units,
mlp_activation, label_mlp_dropout)
for i in range(n_label_mlp_layers)]
self.mlp_label_dep = MLP(layers)
self.arc_biaffine = \
Biaffine(n_arc_mlp_units, n_arc_mlp_units, 1,
bias=(True, False, False))
self.label_biaffine = \
Biaffine(n_label_mlp_units, n_label_mlp_units, n_labels,
bias=(True, True, True))
def forward(self, premise, hypothesis, training=False):
'''
inputs:
premise : batch x T
hypothesis : batch x T
outputs :
pred : batch x num_classes
'''
self.train(training)
batch_size = premise.size(0)
mask_p = torch.ne(premise, 0).type(dtype)
mask_h = torch.ne(hypothesis, 0).type(dtype)
encoded_p = self.embedding(premise) # batch x T x n_embed
encoded_p = F.dropout(encoded_p, p=self.options['DROPOUT'], training=training)
encoded_h = self.embedding(hypothesis) # batch x T x n_embed
encoded_h = F.dropout(encoded_h, p=self.options['DROPOUT'], training=training)
encoded_p = encoded_p.transpose(1, 0) # T x batch x n_embed
encoded_h = encoded_h.transpose(1, 0) # T x batch x n_embed
mask_p = mask_p.transpose(1, 0) # T x batch
mask_h = mask_h.transpose(1, 0) # T x batch
h_0 = self.init_hidden(batch_size) # 1 x batch x n_dim
o_p, h_n = self._gru_forward(self.p_gru, encoded_p, mask_p, h_0) # o_p : T x batch x n_dim
# h_n : 1 x batch x n_dim
o_h, h_n = self._gru_forward(self.h_gru, encoded_h, mask_h, h_n) # o_h : T x batch x n_dim
# h_n : 1 x batch x n_dim
if self.options['WBW_ATTN']:
r_0 = self.attn_rnn_init_hidden(batch_size) # batch x n_dim
r, alpha_vec = self._attn_rnn_forward(o_h, mask_h, r_0, o_p, mask_p) # r : batch x n_dim
# alpha_vec : T x batch x T
else:
r, alpha = self._attention_forward(o_p, mask_p, o_h[-1]) # r : batch x n_dim
# alpha : batch x T
h_star = self._combine_last(r, o_h[-1]) # batch x n_dim
h_star = self.out(h_star) # batch x num_classes
if self.options['LAST_NON_LINEAR']:
h_star = F.leaky_relu(h_star) # Non linear projection
pred = F.log_softmax(h_star)
return pred
def base_test():
fc1 = nn.Linear(10,20)
fc1.weight.data.normal_(0.0,1.0)
fc1.bias.data.normal_(0.0,1.0)
fc2 = nn.Linear(20,2)
fc2.weight.data.normal_(0.0,1.0)
fc2.bias.data.normal_(0.0,1.0)
fc3 = nn.Linear(10,2)
fc3.weight.data.normal_(0.0,1.0)
fc3.bias.data.normal_(0.0,1.0)
fc4 = nn.Linear(10,2)
fc4.weight.data.normal_(0.0,1.0)
fc4.bias.data.normal_(0.0,1.0)
softmax = nn.Softmax()
model0 = lambda x: F.log_softmax(fc2(F.relu(fc1(x))))
model1 = lambda x: F.softmax(F.elu(fc3(x)))
model2 = lambda x: F.softmax(F.tanh(fc3(x)))
model3 = lambda x: F.softmax(F.sigmoid(fc3(x)))
model4 = lambda x: softmax(F.leaky_relu(fc4(x))).clone()
model5 = lambda x: softmax(F.logsigmoid(fc4(x.transpose(0,1))))
model6 = lambda x: fc3(F.max_pool2d(x.unsqueeze(dim=0),2).squeeze())
model7 = lambda x: fc3(F.max_pool2d(x.unsqueeze(dim=0),2).squeeze(dim=0))
model8 = lambda x: fc3(F.max_pool3d(x.unsqueeze(0),2).squeeze())
model9 = lambda x: fc3(F.max_pool1d(x.abs().view(1,1,-1),4).squeeze().view(10,10))
#model10 = lambda x: fc3(x.double())
#model10 = lambda x: fc3(x.view(1,10,10).select(0,0))
model10 = lambda x, y: F.softmax(F.tanh(fc3(torch.cat((x,y),1))))
data = Variable(torch.rand(10,10))
data2 = Variable(torch.rand(20,20))
data1a = Variable(torch.rand(10,5))
data1b = Variable(torch.rand(10,5))
data3 = Variable(torch.rand(2,20,20))
out = model0(data) + \
model1(data) * model2(data) / model3(data) / 2.0 + \
2.0 * model4(data) + model5(data) + 1 - 2.0 + \
model6(data2) + model7(data2) + model8(data3) + model9(data2) + model10(data1a,data1b)
out_path = 'out'
if not os.path.isdir(out_path):
os.mkdir(out_path)
uid = str(uuid.uuid4())
torch2c.compile(out,'base',os.path.join(out_path,uid),compile_test=True)
def forward(self, x):
ind = -2
self.loss = None
outputs = dict()
for block in self.blocks:
ind = ind + 1
#if ind > 0:
# return x
if block['type'] == 'net':
continue
elif block['type'] == 'convolutional' or block['type'] == 'maxpool' or block['type'] == 'reorg' or block['type'] == 'avgpool' or block['type'] == 'softmax' or block['type'] == 'connected':
x = self.models[ind](x)
outputs[ind] = x
elif block['type'] == 'route':
layers = block['layers'].split(',')
layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]
if len(layers) == 1:
x = outputs[layers[0]]
outputs[ind] = x
elif len(layers) == 2:
x1 = outputs[layers[0]]
x2 = outputs[layers[1]]
x = torch.cat((x1,x2),1)
outputs[ind] = x
elif block['type'] == 'shortcut':
from_layer = int(block['from'])
activation = block['activation']
from_layer = from_layer if from_layer > 0 else from_layer + ind
x1 = outputs[from_layer]
x2 = outputs[ind-1]
x = x1 + x2
if activation == 'leaky':
x = F.leaky_relu(x, 0.1, inplace=True)
elif activation == 'relu':
x = F.relu(x, inplace=True)
outputs[ind] = x
elif block['type'] == 'region':
continue
if self.loss:
self.loss = self.loss + self.models[ind](x)
else:
self.loss = self.models[ind](x)
outputs[ind] = None
elif block['type'] == 'cost':
continue
else:
print('unknown type %s' % (block['type']))
return x