def __call__(self, x):
h = self.st(x)
h = F.average_pooling_2d(h, 2, 2) # For TC and RTS datasets
h = F.relu(self.conv1(h))
h = F.max_pooling_2d(h, 2, 2)
h = F.relu(self.conv2(h))
h = F.max_pooling_2d(h, 2, 2)
h = self.fc(h)
return h
python类max_pooling_2d()的实例源码
def affine_matrix(self, x):
h = F.max_pooling_2d(x, 2, 2)
h = F.relu(self.conv1(h))
h = F.max_pooling_2d(h, 2, 2)
h = F.relu(self.conv2(h))
h = F.max_pooling_2d(h, 2, 2)
theta = F.reshape(self.fc(h), (x.shape[0], 2, 3))
return theta
def __call__(self, x):
h = F.leaky_relu(self.bias1(self.bn1(self.conv1(x), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias2(self.bn2(self.conv2(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias3(self.bn3(self.conv3(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias4(self.bn4(self.conv4(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias5(self.bn5(self.conv5(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias6(self.bn6(self.conv6(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias7(self.bn7(self.conv7(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias8(self.bn8(self.conv8(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias9(self.bn9(self.conv9(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias10(self.bn10(self.conv10(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias11(self.bn11(self.conv11(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias12(self.bn12(self.conv12(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias13(self.bn13(self.conv13(h), finetune=self.finetune)), slope=0.1)
high_resolution_feature = h
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias14(self.bn14(self.conv14(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias15(self.bn15(self.conv15(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias16(self.bn16(self.conv16(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias17(self.bn17(self.conv17(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias18(self.bn18(self.conv18(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias19(self.bn19(self.conv19(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias20(self.bn20(self.conv20(h), finetune=self.finetune)), slope=0.1)
h2 = high_resolution_feature
h2 = F.leaky_relu(self.bias21(self.bn21(self.conv21(h2), finetune=self.finetune)), slope=0.1)
h2 = reorg(h2)
h = F.concat((h2, h), axis=1)
h = F.leaky_relu(self.bias22(self.bn22(self.conv22(h), finetune=self.finetune)), slope=0.1)
h = self.bias23(self.conv23(h))
return h
def __call__(self, x):
h = F.leaky_relu(self.bias1(self.bn1(self.conv1(x), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.dropout(h, 0.25)
h = F.leaky_relu(self.bias2(self.bn2(self.conv2(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.dropout(h, 0.25)
h = F.leaky_relu(self.bias3(self.bn3(self.conv3(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias4(self.bn4(self.conv4(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias5(self.bn5(self.conv5(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.dropout(h, 0.25)
h = F.leaky_relu(self.bias6(self.bn6(self.conv6(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias7(self.bn7(self.conv7(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias8(self.bn8(self.conv8(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.dropout(h, 0.25)
h = F.leaky_relu(self.bias9(self.bn9(self.conv9(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias10(self.bn10(self.conv10(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias11(self.bn11(self.conv11(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias12(self.bn12(self.conv12(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias13(self.bn13(self.conv13(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.dropout(h, 0.25)
h = F.leaky_relu(self.bias14(self.bn14(self.conv14(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias15(self.bn15(self.conv15(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias16(self.bn16(self.conv16(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias17(self.bn17(self.conv17(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias18(self.bn18(self.conv18(h), finetune=self.finetune)), slope=0.1)
h = F.average_pooling_2d(h, h.shape[-2:])
h = self.fc19(h)
return h
def __call__(self, x):
h = F.leaky_relu(self.bias1(self.bn1(self.conv1(x), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias2(self.bn2(self.conv2(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias3(self.bn3(self.conv3(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias4(self.bn4(self.conv4(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias5(self.bn5(self.conv5(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias6(self.bn6(self.conv6(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias7(self.bn7(self.conv7(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias8(self.bn8(self.conv8(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias9(self.bn9(self.conv9(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias10(self.bn10(self.conv10(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias11(self.bn11(self.conv11(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias12(self.bn12(self.conv12(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias13(self.bn13(self.conv13(h), finetune=self.finetune)), slope=0.1)
high_resolution_feature = h
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias14(self.bn14(self.conv14(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias15(self.bn15(self.conv15(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias16(self.bn16(self.conv16(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias17(self.bn17(self.conv17(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias18(self.bn18(self.conv18(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias19(self.bn19(self.conv19(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias20(self.bn20(self.conv20(h), finetune=self.finetune)), slope=0.1)
h2 = high_resolution_feature
h2 = F.leaky_relu(self.bias21(self.bn21(self.conv21(h2), finetune=self.finetune)), slope=0.1)
h2 = reorg(h2)
h = F.concat((h2, h), axis=1)
h = F.leaky_relu(self.bias22(self.bn22(self.conv22(h), finetune=self.finetune)), slope=0.1)
h = self.bias23(self.conv23(h))
return h
def __call__(self, x):
h = F.leaky_relu(self.bias1(self.bn1(self.conv1(x), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias2(self.bn2(self.conv2(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias3(self.bn3(self.conv3(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias4(self.bn4(self.conv4(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias5(self.bn5(self.conv5(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias6(self.bn6(self.conv6(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias7(self.bn7(self.conv7(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias8(self.bn8(self.conv8(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias9(self.bn9(self.conv9(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias10(self.bn10(self.conv10(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias11(self.bn11(self.conv11(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias12(self.bn12(self.conv12(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias13(self.bn13(self.conv13(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias14(self.bn14(self.conv14(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias15(self.bn15(self.conv15(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias16(self.bn16(self.conv16(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias17(self.bn17(self.conv17(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias18(self.bn18(self.conv18(h), finetune=self.finetune)), slope=0.1)
h = F.average_pooling_2d(h, h.shape[-2:])
h = self.fc19(h)
return h
def __call__(self, x, t=None):
h = x
h = F.relu(self.conv1_1(h))
h = F.relu(self.conv1_2(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.relu(self.conv3_3(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv4_1(h))
h = F.relu(self.conv4_2(h))
h = F.relu(self.conv4_3(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv5_1(h))
h = F.relu(self.conv5_2(h))
h = F.relu(self.conv5_3(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.dropout(F.relu(self.fc6(h)), ratio=.5)
h = F.dropout(F.relu(self.fc7(h)), ratio=.5)
h = self.fc8(h)
fc8 = h
self.score = fc8
if t is None:
assert not chainer.config.train
return
self.loss = F.softmax_cross_entropy(fc8, t)
self.accuracy = F.accuracy(self.score, t)
return self.loss
def __call__(self, x, t, train=True, finetune=False):
h = self.l1(x, train, finetune)
# h = F.dropout(h, self.dr, train)
h = F.max(h, axis=-3, keepdims=False)
h = self.l2(h, train, finetune)
h = F.max(h, axis=-3, keepdims=False)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = self.l3(h, train, finetune)
h = F.max(h, axis=-3, keepdims=False)
# h = F.dropout(h, self.dr, train)
h = self.l4(h, train, finetune)
h = F.max(h, axis=-3, keepdims=False)
# h = F.dropout(h, self.dr, train)
h = self.l5(h, train, finetune)
h = F.max(h, axis=-3, keepdims=False)
# h = F.dropout(h, self.dr, train)
h = self.l6(h, train, finetune)
h = F.max(h, axis=-3, keepdims=False)
h = self.top(h)
h = F.max(h, axis=-3, keepdims=False)
h = F.max(h, axis=-1, keepdims=False)
h = F.max(h, axis=-1, keepdims=False)
return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
def __call__(self, x):
y1 = F.relu(self.conv1_2(F.relu(self.conv1_1(x))))
h = F.max_pooling_2d(y1, 2, stride=2)
y2 = F.relu(self.conv2_2(F.relu(self.conv2_1(h))))
h = F.max_pooling_2d(y2, 2, stride=2)
y3 = F.relu(self.conv3_3(F.relu(self.conv3_2(F.relu(self.conv3_1(h))))))
h = F.max_pooling_2d(y3, 2, stride=2)
y4 = F.relu(self.conv4_3(F.relu(self.conv4_2(F.relu(self.conv4_1(h))))))
return [y1, y2, y3, y4]
def __call__(self, x):
h = F.relu(self.bn1(self.conv1(x)))
h = F.max_pooling_2d(h, 3, stride=2, pad=0)
return h
def __call__(self, x, t, before_fc=False):
self.clear()
h = self.bn1(self.conv1(x), test=not self.train)
h = F.max_pooling_2d(F.relu(h), 3, stride=2)
h = self.res2(h, self.train)
h = self.res3(h, self.train)
h = self.res4(h, self.train)
h = self.res5(h, self.train)
h = F.average_pooling_2d(h, h.data.shape[2], stride=1)
self.feature = h
return h
def __call__(self, x, train=False):
h = self.conv_bn_relu(x, train=train)
h = F.max_pooling_2d(h, (3, 3), (2, 2), (1, 1))
for i, n in enumerate(self.block_num):
for ii in six.moves.range(n):
h = self['resnext_block_{}_{}'.format(i, ii)](h, train=train)
batch, channels, height, width = h.data.shape
h = F.reshape(F.average_pooling_2d(h, (height, width)), (batch, channels))
return self.linear(h, train)
def plane_group_spatial_max_pooling(x, ksize, stride=None, pad=0, cover_all=True, use_cudnn=True):
xs = x.data.shape
x = F.reshape(x, (xs[0], xs[1] * xs[2], xs[3], xs[4]))
x = F.max_pooling_2d(x, ksize, stride, pad, cover_all, use_cudnn)
x = F.reshape(x, (xs[0], xs[1], xs[2], x.data.shape[2], x.data.shape[3]))
return x
def __call__(self, x):
layer_names = ['1_1', '1_2', 'pool', '2_1', '2_2', 'pool', '3_1', '3_2', '3_3', 'pool', '4_1', '4_2', '4_3']
layers = {}
h = x
for layer_name in layer_names:
if layer_name == 'pool':
h = F.max_pooling_2d(h, 2, stride=2)
else:
h = F.relu(self['conv' + layer_name](h))
layers[layer_name] = h
return layers
def __call__(self, x):
layer_names = ['1_1', '1_2', 'pool', '2_1', '2_2', 'pool', '3_1',
'3_2', '3_3', '3_4', 'pool', '4_1', '4_2', '4_3', '4_4',
'pool', '5_1', '5_2', '5_3', '5_4']
layers = {}
h = x
for layer_name in layer_names:
if layer_name == 'pool':
h = F.max_pooling_2d(h, 2, stride=2)
else:
h = F.relu(self['conv' + layer_name](h))
layers[layer_name] = h
return layers
def forward(self, ws, cs, ls, dep_ts=None):
batchsize = len(ws)
xp = chainer.cuda.get_array_module(ws[0])
ws = map(self.emb_word, ws)
cs = [F.squeeze(
F.max_pooling_2d(
self.conv_char(
F.expand_dims(
self.emb_char(c), 1)), (int(l[0]), 1)))
for c, l in zip(cs, ls)]
xs_f = [F.dropout(F.concat([w, c]),
self.dropout_ratio, train=self.train) for w, c in zip(ws, cs)]
xs_b = [x[::-1] for x in xs_f]
cx_f, hx_f, cx_b, hx_b = self._init_state(xp, batchsize)
_, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
_, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
hs_b = [x[::-1] for x in hs_b]
hs = [F.concat([h_f, h_b]) for h_f, h_b in zip(hs_f, hs_b)]
dep_ys = [self.biaffine_arc(
F.elu(F.dropout(self.arc_dep(h), 0.32, train=self.train)),
F.elu(F.dropout(self.arc_head(h), 0.32, train=self.train))) for h in hs]
if dep_ts is not None:
heads = dep_ts
else:
heads = [F.argmax(y, axis=1) for y in dep_ys]
cat_ys = [
self.biaffine_tag(
F.elu(F.dropout(self.rel_dep(h), 0.32, train=self.train)),
F.elu(F.dropout(self.rel_head(
F.embed_id(t, h, ignore_label=IGNORE)), 0.32, train=self.train))) \
for h, t in zip(hs, heads)]
return cat_ys, dep_ys
def forward(self, ws, cs, ls):
"""
xs [(w,s,p,y), ..., ]
w: word, c: char, l: length, y: label
"""
batchsize = len(ws)
# cs: [(sentence length, max word length)]
ws = map(self.emb_word, ws)
# ls: [(sentence length, char dim)]
# before conv: (sent len, 1, max word len, char_size)
# after conv: (sent len, char_size, max word len, 1)
# after max_pool: (sent len, char_size, 1, 1)
cs = [F.squeeze(
F.max_pooling_2d(
self.conv_char(
F.expand_dims(
self.emb_char(c), 1)), (l, 1)))
for c, l in zip(cs, ls)]
# [(sentence length, (word_dim + char_dim))]
xs_f = [F.dropout(F.concat([w, c]),
self.dropout_ratio, train=self.train) for w, c in zip(ws, cs)]
xs_b = [x[::-1] for x in xs_f]
cx_f, hx_f, cx_b, hx_b = self._init_state(batchsize)
_, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
_, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
hs_b = [x[::-1] for x in hs_b]
# ys: [(sentence length, number of category)]
hs = [F.concat([h_f, h_b]) for h_f, h_b in zip(hs_f, hs_b)]
cat_ys = [self.linear_cat2(F.relu(self.linear_cat1(h))) for h in hs]
dep_ys = [self.biaffine(
F.relu(F.dropout(self.linear_dep(h), 0.32, train=self.train)),
F.relu(F.dropout(self.linear_head(h), 0.32, train=self.train))) for h in hs]
return cat_ys, dep_ys
def forward(self, ws, cs):
batchsize, length, max_word_len = cs.shape
ws = self.emb_word(ws) # (batch, length, word_dim)
cs = F.reshape(
F.max_pooling_2d(
self.conv_char(
F.reshape(
self.emb_char(cs),
(batchsize * length, 1, max_word_len, 50))), (max_word_len, 1)),
(batchsize, length, self.char_dim))
hs = F.transpose(F.concat([ws, cs], 2), (1, 0, 2))
hs = F.dropout(hs, self.dropout_ratio, train=self.train)
hs = F.split_axis(hs, length, 0)
hs_f = []
hs_b = []
self._init_state()
for h_in_f, h_in_b in zip(hs, reversed(hs)):
h_f = self.lstm_f2(self.lstm_f1(F.reshape(h_in_f, (batchsize, -1))))
hs_f.append(h_f)
h_b = self.lstm_b2(self.lstm_b1(F.reshape(h_in_b, (batchsize, -1))))
hs_b.append(h_b)
hs = [F.concat([h_f, h_b]) for h_f, h_b in zip(hs_f, reversed(hs_b))]
cat_ys = [self.linear_cat2(F.dropout(
F.elu(self.linear_cat1(h)), 0.5, train=self.train)) for h in hs]
hs = [F.reshape(h, (length, -1)) for h in \
F.split_axis(F.transpose(F.stack(hs, 2), (0, 2, 1)), batchsize, 0)]
dep_ys = [self.biaffine(
F.relu(F.dropout(self.linear_dep(h), 0.32, train=self.train)),
F.relu(F.dropout(self.linear_head(h), 0.32, train=self.train))) for h in hs]
return cat_ys, dep_ys
def __call__(self, x, train=True):
h = F.max_pooling_2d(self.bn2(F.relu(self.conv1(x))), 3, stride=3)
h = F.max_pooling_2d(self.bn4(F.relu(self.conv3(h))), 3, stride=3)
h = F.max_pooling_2d(self.bn6(F.relu(self.conv5(h))), 2, stride=2)
h = F.dropout(F.relu(self.fc7(h)), train=train)
h = F.dropout(F.relu(self.fc8(h)), train=train)
y = self.fc9(h)
return y
def __call__(self, x):
x = x/255.
h = F.relu(self.conv1(x))
h = F.relu(self.conv2(h))
h = F.relu(self.conv3(h))
#h = F.max_pooling_2d(F.relu(self.bnorm4(self.conv4(h))), 2, stride=2)
h = F.relu(self.fc1(h))
y = self.fc2(h)
return y