def forward(self, x):
n_idx = 0
c_idx = 1
h_idx = 2
w_idx = 3
x = self.lookup_table(x)
x = x.unsqueeze(c_idx)
enc_outs = []
for encoder in self.encoders:
enc_ = F.relu(encoder(x))
k_h = enc_.size()[h_idx]
enc_ = F.max_pool2d(enc_, kernel_size=(k_h, 1))
enc_ = enc_.squeeze(w_idx)
enc_ = enc_.squeeze(h_idx)
enc_outs.append(enc_)
encoding = self.dropout(torch.cat(enc_outs, 1))
return F.log_softmax(self.logistic(encoding))
python类max_pool2d()的实例源码
def forward(self, X):
h = F.relu(self.conv1_1(X))
h = F.relu(self.conv1_2(h))
relu1_2 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
relu2_2 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.relu(self.conv3_3(h))
relu3_3 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv4_1(h))
h = F.relu(self.conv4_2(h))
h = F.relu(self.conv4_3(h))
relu4_3 = h
return [relu1_2,relu2_2,relu3_3,relu4_3]
def vgg(inputs, model):
'''VGG definition with style and content outputs.
'''
style, content = [], []
def block(x, ids):
for i in ids:
x = F.relu(F.conv2d(x, Variable(model.features[i].weight.data.cuda()),Variable(model.features[i].bias.data.cuda()), 1, 1), inplace=True)
if i in style_layers:
style.append(gram(x))
if i in content_layers:
content.append(x)
return F.max_pool2d(x, 2, 2)
o = block(inputs, [0, 2])
o = block(o, [5, 7])
o = block(o, [10, 12, 14])
o = block(o, [17, 19, 21])
o = block(o, [24, 26, 28])
return style, content
def forward(self, X):
h = F.relu(self.conv1_1(X))
h = F.relu(self.conv1_2(h))
relu1_2 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
relu2_2 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.relu(self.conv3_3(h))
relu3_3 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv4_1(h))
h = F.relu(self.conv4_2(h))
h = F.relu(self.conv4_3(h))
relu4_3 = h
return [relu1_2, relu2_2, relu3_3, relu4_3]
def forward(self, x):
nBatch = x.size(0)
x = F.max_pool2d(self.conv1(x), 2)
x = F.max_pool2d(self.conv2(x), 2)
x = x.view(nBatch, -1)
L = self.M*self.L
Q = L.mm(L.t()) + self.eps*Variable(torch.eye(self.nHidden)).cuda()
Q = Q.unsqueeze(0).expand(nBatch, self.nHidden, self.nHidden)
G = self.G.unsqueeze(0).expand(nBatch, self.nineq, self.nHidden)
z0 = self.qp_z0(x)
s0 = self.qp_s0(x)
h = z0.mm(self.G.t())+s0
e = Variable(torch.Tensor())
inputs = self.qp_o(x)
x = QPFunction()(Q, inputs, G, h, e, e)
x = x[:,:10]
return F.log_softmax(x)
def forward(self, x):
relu1_1 = F.relu(self.conv1_1(x))
relu1_2 = F.relu(self.conv1_2(relu1_1))
maxpool_1 = F.max_pool2d(relu1_2, kernel_size=2, stride=2)
relu2_1 = F.relu(self.conv2_1(maxpool_1))
relu2_2 = F.relu(self.conv2_2(relu2_1))
maxpool_2 = F.max_pool2d(relu2_2, kernel_size=2, stride=2)
relu3_1 = F.relu(self.conv3_1(maxpool_2))
relu3_2 = F.relu(self.conv3_2(relu3_1))
relu3_3 = F.relu(self.conv3_3(relu3_2))
maxpool_3 = F.max_pool2d(relu3_3, kernel_size=2, stride=2)
relu4_1 = F.relu(self.conv4_1(maxpool_3))
relu4_2 = F.relu(self.conv4_2(relu4_1))
relu4_3 = F.relu(self.conv4_3(relu4_2))
return [relu1_2,relu2_2,relu3_3,relu4_3]
def forward(self, X):
h = F.relu(self.conv1_1(X))
h = F.relu(self.conv1_2(h))
relu1_2 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
relu2_2 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.relu(self.conv3_3(h))
relu3_3 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv4_1(h))
h = F.relu(self.conv4_2(h))
h = F.relu(self.conv4_3(h))
relu4_3 = h
return [relu1_2, relu2_2, relu3_3, relu4_3]
## Weights init function
def forward(self, x):
# layer1
h = F.relu(self.conv1(x))
h = F.max_pool2d(h, 3, stride=2)
# layer2
h = F.relu(self.conv2(h))
h = F.max_pool2d(h, 3, stride=2)
# layer3-5
h = F.relu(self.conv3(h))
h = F.relu(self.conv4(h))
h = F.relu(self.conv5(h))
h = F.max_pool2d(h, 3, stride=2)
h = h.view(-1, 256*6*6)
# layer6-8
h = F.dropout(F.relu(self.fc6(h)), training=self.training)
h = F.dropout(F.relu(self.fc7(h)), training=self.training)
h = self.fc8(h)
return h.view(-1, self.Nj, 2)
def forward(self, x):
# Bottom-up
c1 = F.relu(self.bn1(self.conv1(x)))
c1 = F.max_pool2d(c1, kernel_size=3, stride=2, padding=1)
c2 = self.layer1(c1)
c3 = self.layer2(c2)
c4 = self.layer3(c3)
c5 = self.layer4(c4)
p6 = self.conv6(c5)
p7 = self.conv7(F.relu(p6))
# Top-down
p5 = self.latlayer1(c5)
p4 = self._upsample_add(p5, self.latlayer2(c4))
p4 = self.toplayer1(p4)
p3 = self._upsample_add(p4, self.latlayer3(c3))
p3 = self.toplayer2(p3)
return p3, p4, p5, p6, p7
def forward(self, x):
x = F.relu(self.conv1_1(x))
x = F.relu(self.conv1_2(x))
x = F.max_pool2d(x,kernel_size=2, stride=2,return_indices=False)
x = F.relu(self.conv2_1(x))
x = F.relu(self.conv2_2(x))
x = F.max_pool2d(x,kernel_size=2, stride=2,return_indices=False)
x = F.relu(self.conv3_1(x))
x = F.relu(self.conv3_2(x))
x = F.relu(self.conv3_3(x))
x = F.max_pool2d(x,kernel_size=2, stride=2,return_indices=False)
x = F.relu(self.conv4_1(x))
x = F.relu(self.conv4_2(x))
x = F.relu(self.conv4_3(x))
x = F.max_pool2d(x,kernel_size=2, stride=2,return_indices=False)
x = F.relu(self.conv5_1(x))
x = F.relu(self.conv5_2(x))
x = F.relu(self.conv5_3(x))
x = self.prob(x)
return x
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.bn1(x)
x = self.conv2(x)
x = self.bn2(x)
x = F.max_pool2d(x, kernel_size=(1,2), stride=(1,2), padding=(0,1))
x = F.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = F.max_pool2d(x, kernel_size=(1,2), stride=(1,2), padding=(0,1))
x = F.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = F.avg_pool2d(x, kernel_size=(1,2), stride=(1,2), padding=0)
x = F.relu(x)
x = x.view(-1,192)
return x
def adaptive_avgmax_pool2d(x, pool_type='avg', padding=0, count_include_pad=False):
"""Selectable global pooling function with dynamic input kernel size
"""
if pool_type == 'avgmaxc':
x = torch.cat([
F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad),
F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
], dim=1)
elif pool_type == 'avgmax':
x_avg = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
x_max = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
x = 0.5 * (x_avg + x_max)
elif pool_type == 'max':
x = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
else:
if pool_type != 'avg':
print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
x = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
return x
def adaptive_avgmax_pool2d(x, pool_type='avg', padding=0, count_include_pad=False):
"""Selectable global pooling function with dynamic input kernel size
"""
if pool_type == 'avgmaxc':
x = torch.cat([
F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad),
F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
], dim=1)
elif pool_type == 'avgmax':
x_avg = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
x_max = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
x = 0.5 * (x_avg + x_max)
elif pool_type == 'max':
x = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
else:
if pool_type != 'avg':
print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
x = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
return x
def _crop_pool_layer(self, bottom, rois, max_pool=True):
# implement it using stn
# box to affine
# input (x1,y1,x2,y2)
"""
[ x2-x1 x1 + x2 - W + 1 ]
[ ----- 0 --------------- ]
[ W - 1 W - 1 ]
[ ]
[ y2-y1 y1 + y2 - H + 1 ]
[ 0 ----- --------------- ]
[ H - 1 H - 1 ]
"""
rois = rois.detach()
x1 = rois[:, 1::4] / 16.0
y1 = rois[:, 2::4] / 16.0
x2 = rois[:, 3::4] / 16.0
y2 = rois[:, 4::4] / 16.0
height = bottom.size(2)
width = bottom.size(3)
# affine theta
theta = Variable(rois.data.new(rois.size(0), 2, 3).zero_())
theta[:, 0, 0] = (x2 - x1) / (width - 1)
theta[:, 0 ,2] = (x1 + x2 - width + 1) / (width - 1)
theta[:, 1, 1] = (y2 - y1) / (height - 1)
theta[:, 1, 2] = (y1 + y2 - height + 1) / (height - 1)
if max_pool:
pre_pool_size = cfg.POOLING_SIZE * 2
grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, pre_pool_size, pre_pool_size)))
crops = F.grid_sample(bottom.expand(rois.size(0), bottom.size(1), bottom.size(2), bottom.size(3)), grid)
crops = F.max_pool2d(crops, 2, 2)
else:
grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, cfg.POOLING_SIZE, cfg.POOLING_SIZE)))
crops = F.grid_sample(bottom.expand(rois.size(0), bottom.size(1), bottom.size(2), bottom.size(3)), grid)
return crops
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), 2)
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.softmax(x)
return x
def forward(self, input):
bsz, word_len, char_len = input.size()
encode = input.view(-1, char_len)
encode = self.char_ebd(encode).unsqueeze(1)
encode = F.relu(self.char_cnn(encode))
encode = F.max_pool2d(encode,
kernel_size=(encode.size(2), 1))
encode = F.dropout(encode.squeeze(), p=self.dropout)
return encode.view(bsz, word_len, -1)
def forward(self, x, typ):
if typ == ForwardType.Content:
is_style, is_content = False, True
elif typ == ForwardType.Style:
is_style, is_content = True, False
elif typ == ForwardType.Train:
is_style, is_content = True, True
else:
raise Exception('Unknown forward type, {}'.format(typ))
internals = {}
x = F.relu(self.conv1_1(x))
x = F.relu(self.conv1_2(x))
if is_style:
internals['conv1_2'] = x
x = F.max_pool2d(x, 2, stride=2)
x = F.relu(self.conv2_1(x))
x = F.relu(self.conv2_2(x))
if is_style or is_content:
internals['conv2_2'] = x
x = F.max_pool2d(x, 2, stride=2)
x = F.relu(self.conv3_1(x))
x = F.relu(self.conv3_2(x))
x = F.relu(self.conv3_3(x))
if is_style:
internals['conv3_3'] = x
x = F.max_pool2d(x, 2, stride=2)
x = F.relu(self.conv4_1(x))
x = F.relu(self.conv4_2(x))
x = F.relu(self.conv4_3(x))
if is_style:
internals['conv4_3'] = x
return internals
def forward(self, x):
x = F.max_pool2d(F.pad(x, (0,1,0,1), mode='replicate'), 2, stride=1)
return x
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
out = self.fc3(x)
return out
def forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
def forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch7x7x3, branch_pool]
return torch.cat(outputs, 1)
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return(out)
def forward(self, x):
y1 = self.sep_conv1(x)
y2 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)
if self.stride==2:
y2 = self.bn1(self.conv1(y2))
return F.relu(y1+y2)
def forward(self, x):
# Left branch
y1 = self.sep_conv1(x)
y2 = self.sep_conv2(x)
# Right branch
y3 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)
if self.stride==2:
y3 = self.bn1(self.conv1(y3))
y4 = self.sep_conv3(x)
# Concat & reduce channels
b1 = F.relu(y1+y2)
b2 = F.relu(y3+y4)
y = torch.cat([b1,b2], 1)
return F.relu(self.bn2(self.conv2(y)))
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
def forward(self, x):
x = F.max_pool2d(self.conv1(x), 2)
x = F.relu(x)+F.relu(-x)
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = self.bn(x)
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
x = F.log_softmax(x)
return x
def forward(self, i):
i = self.cn1(i)
i = F.relu(i)
i = F.max_pool2d(i, 2)
i =self.cn2(i)
i = F.relu(i)
i = F.max_pool2d(i, 2)
i = i.view(len(i), -1)
i = self.fc1(i)
i = F.log_softmax(i)
return i
#get some random data around value
def forward(self, x):
x = F.max_pool2d(self.conv1(x), 2)
x = F.relu(x)+F.relu(-x)
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = self.bn(x)
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
x = F.log_softmax(x)
return x
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
# not working
def forward(self, inputs):
d0 = self.down0(inputs)
d1 = self.down1(d0)
d2 = self.down2(F.max_pool2d(d1, kernel_size=2, stride=2))
d3 = self.down3(F.max_pool2d(d2, kernel_size=2, stride=2))
d4 = self.down4(F.max_pool2d(d3, kernel_size=2, stride=2))
d5 = self.down5(F.max_pool2d(d4, kernel_size=2, stride=2))
d6 = self.down6(F.max_pool2d(d5, kernel_size=2, stride=2))
out = self.center(F.max_pool2d(d6, kernel_size=2, stride=2))
out = self.up6(
torch.cat([F.upsample(out, scale_factor=2, mode='bilinear'), d6], dim=1))
out = self.up5(
torch.cat([F.upsample(out, scale_factor=2, mode='bilinear'), d5], dim=1))
out = self.up4(
torch.cat([F.upsample(out, scale_factor=2, mode='bilinear'), d4], dim=1))
out = self.up3(
torch.cat([F.upsample(out, scale_factor=2, mode='bilinear'), d3], dim=1))
out = self.up2(
torch.cat([F.upsample(out, scale_factor=2, mode='bilinear'), d2], dim=1))
out = self.up1(
torch.cat([F.upsample(out, scale_factor=2, mode='bilinear'), d1], dim=1))
out = self.f1(torch.cat([out, d0], dim=1))
out = self.f2(torch.cat([out, inputs], dim=1))
out = self.out(out)
out = out.squeeze(1) # remove logits dim
return out