def __call__(self, x, t, train=True, finetune=False):
# First conv layer
h = self[0](x)
# Residual blocks
for i in range(1, len(self) - 2):
h = self[i](h, train, finetune)
# BN, relu, pool, final layer
h = self[-2](h)
h = F.relu(h)
n, nc, ns, nx, ny = h.data.shape
h = F.reshape(h, (n, nc * ns, nx, ny))
h = F.average_pooling_2d(h, ksize=h.data.shape[2:])
h = self[-1](h)
h = F.reshape(h, h.data.shape[:2])
return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
python类average_pooling_2d()的实例源码
def __call__(self, x, t, train=True, finetune=False):
h = x
# First conv layer
h = self[0](h)
# Residual blocks
for i in range(1, len(self) - 2):
h = self[i](h, train, finetune)
# BN, relu, pool, final layer
h = self[-2](h)
h = F.relu(h)
h = F.average_pooling_2d(h, ksize=h.data.shape[2:])
h = self[-1](h)
h = F.reshape(h, h.data.shape[:2])
return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
def __call__(self, x, train=True):
h = self.conv1(x, train)
h = self.conv2(h, train)
h = self.conv3(h, train)
h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
h = self.conv4(h, train)
h = self.conv5(h, train)
h = self.conv6(h, train)
h = self.inception_f5_1(h, train)
h = self.inception_f5_2(h, train)
h = self.inception_f5_3(h, train)
h = self.inception_f6_1(h, train)
h = self.inception_f6_2(h, train)
h = self.inception_f6_3(h, train)
h = self.inception_f6_4(h, train)
h = self.inception_f6_5(h, train)
h = self.inception_f7_1(h, train)
h = self.inception_f7_2(h, train)
num, categories, y, x = h.data.shape
# global average pooling
h = F.reshape(F.average_pooling_2d(h, (y, x)), (num, categories))
h = F.dropout(h, ratio=0.2, train=train)
h = self.linear(h)
return h
def _setup_pooling(self, layer):
param = layer.pooling_param
ksize = _get_ksize(param)
stride = _get_stride(param)
pad = _get_pad(param)
if param.pool == param.MAX:
func = functions.max_pooling_2d
elif param.pool == param.AVE:
func = functions.average_pooling_2d
else:
raise RuntimeError('Stochastic pooling is not supported')
fw = _SingleArgumentFunction(func, ksize, stride=stride, pad=pad)
self.forwards[layer.name] = fw
self._add_layer(layer)
def check_forward(self, x_data, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.average_pooling_2d(x, 3, stride=2,
pad=1, use_cudnn=use_cudnn)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
for k in six.moves.range(2):
for c in six.moves.range(3):
x = self.x[k, c]
expect = numpy.array([
[x[0:2, 0:2].sum(), x[0:2, 1:3].sum()],
[x[1:4, 0:2].sum(), x[1:4, 1:3].sum()]]) / 9
gradient_check.assert_allclose(
expect, y_data[k, c], **self.check_forward_options)
convolutional_pose_machine.py 文件源码
项目:convolutional-pose-machines-chainer
作者: tomoyukun
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def __call__(self, pmap, fmap, cmap):
fmap = self.conv0(fmap)
fmap = F.relu(fmap)
cmap = F.average_pooling_2d(cmap, ksize=8, stride=8)
h = F.concat((fmap, pmap, cmap), 1)
h = self.conv1(h)
h = F.relu(h)
h = self.conv2(h)
h = F.relu(h)
h = self.conv3(h)
h = F.relu(h)
h = self.conv4(h)
h = F.relu(h)
h = self.conv5(h)
return h
__init__.py 文件源码
项目:convolutional-pose-machines-chainer
作者: tomoyukun
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def __call__(self, pmap, fmap, cmap):
fmap = self.conv0(fmap)
fmap = F.relu(fmap)
cmap = F.average_pooling_2d(cmap, ksize=8, stride=8)
h = F.concat((fmap, pmap, cmap), 1)
h = self.conv1(h)
h = F.relu(h)
h = self.conv2(h)
h = F.relu(h)
h = self.conv3(h)
h = F.relu(h)
h = self.conv4(h)
h = F.relu(h)
h = self.conv5(h)
return h
def __call__(self, x):
h = F.relu(self.conv1_1(x))
h = F.relu(self.conv1_2(h))
h = F.average_pooling_2d(h, 2, 2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
h = F.average_pooling_2d(h, 2, 2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.relu(self.conv3_3(h))
h = F.relu(self.conv3_4(h))
h = F.average_pooling_2d(h, 2, 2)
h = F.relu(self.fc4(h))
h = F.relu(self.fc5(h))
h = self.fc6(h)
L_out = h
return L_out
def _setup_pooling(self, layer):
param = layer.pooling_param
ksize = _get_ksize(param)
stride = _get_stride(param)
pad = _get_pad(param)
if param.pool == param.MAX:
func = functions.max_pooling_2d
elif param.pool == param.AVE:
func = functions.average_pooling_2d
else:
raise RuntimeError('Stochastic pooling is not supported')
fw = _SingleArgumentFunction(func, ksize, stride=stride, pad=pad)
self.forwards[layer.name] = fw
self._add_layer(layer)
def __call__(self, x, train=True):
h = self.conv1(x, train)
h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
h = self.conv2_1x1(h, train)
h = self.conv2_3x3(h, train)
h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
h = self.inception3a(h, train)
h = self.inception3b(h, train)
h = self.inception3c(h, train)
h = self.inception4a(h, train)
h = self.inception4b(h, train)
h = self.inception4c(h, train)
h = self.inception4d(h, train)
h = self.inception4e(h, train)
h = self.inception5a(h, train)
h = self.inception5b(h, train)
num, categories, y, x = h.data.shape
# global average pooling
h = F.reshape(F.average_pooling_2d(h, (y, x)), (num, categories))
h = self.linear(h)
return h
def __call__(self, x_0: chainer.Variable, x_1: chainer.Variable) -> typing.List[chainer.Variable]:
hs = []
h = self.c0_0(x_0)
if self.will_concat:
h = F.concat([h, self.c0_1(x_1)])
h = self.c1(h)
hs.append(self.out_1(chainer.functions.average_pooling_2d(h, (h.shape[2], h.shape[3]))))
# hs.append(chainer.functions.average_pooling_2d
h = self.c2(h)
hs.append(self.out_2(chainer.functions.average_pooling_2d(h, (h.shape[2], h.shape[3]))))
h = self.c3(h)
h = self.c4(h)
hs.append(h)
return hs
def __call__(self, x, train=False):
h = F.relu(self.conv1(x))
h = F.max_pooling_2d(h, 3, stride=2)
h = self.fire2(h)
h = self.fire3(h)
h = self.fire4(h)
h = F.max_pooling_2d(h, 3, stride=2)
h = self.fire5(h)
h = self.fire6(h)
h = self.fire7(h)
h = self.fire8(h)
h = F.max_pooling_2d(h, 3, stride=2)
h = self.fire9(h)
h = F.dropout(h, ratio=0.5, train=train)
h = F.relu(self.conv10(h))
h = F.average_pooling_2d(h, 13)
return F.reshape(h, (-1, 1000))
def __call__(self, x):
conv1_1 = F.relu(self.vgg.conv1_1(x))
conv1_2 = F.relu(self.vgg.conv1_2(conv1_1))
pool1 = F.average_pooling_2d(conv1_2, 2, stride=2)
conv2_1 = F.relu(self.vgg.conv2_1(pool1))
conv2_2 = F.relu(self.vgg.conv2_2(conv2_1))
pool2 = F.average_pooling_2d(conv2_2, 2, stride=2)
conv3_1 = F.relu(self.vgg.conv3_1(pool2))
conv3_2 = F.relu(self.vgg.conv3_2(conv3_1))
conv3_3 = F.relu(self.vgg.conv3_3(conv3_2))
conv3_4 = F.relu(self.vgg.conv3_4(conv3_3))
pool3 = F.average_pooling_2d(conv3_4, 2, stride=2)
conv4_1 = F.relu(self.vgg.conv4_1(pool3))
conv4_2 = F.relu(self.vgg.conv4_2(conv4_1))
conv4_3 = F.relu(self.vgg.conv4_3(conv4_2))
conv4_4 = F.relu(self.vgg.conv4_4(conv4_3))
pool4 = F.average_pooling_2d(conv4_4, 2, stride=2)
conv5_1 = F.relu(self.vgg.conv5_1(pool4))
return tuple([conv1_1, conv2_1, conv3_1, conv4_1, conv5_1, conv4_2])
def __call__(self, x, t):
self.clear()
h = self.bn1(self.conv1(x), test=not self.train)
h = F.max_pooling_2d(F.relu(h), 3, stride=2)
h = self.res2(h, self.train)
h = self.res3(h, self.train)
h = self.res4(h, self.train)
h = self.res5(h, self.train)
h = F.average_pooling_2d(h, 7, stride=1)
if t=="feature":
return h
h = self.fc(h)
if self.train:
self.loss = F.softmax_cross_entropy(h, t)
self.accuracy = F.accuracy(h, t)
return self.loss
else:
return h
def __call__(self, x, t, predict=False):
h = self.bn1(self.conv1(x), test=not self.train)
h = F.max_pooling_2d(F.relu(h), 2, stride=2)
h = self.bn2(self.conv2(h), test=not self.train)
h = F.max_pooling_2d(F.relu(h), 2, stride=2)
h = F.dropout(F.relu(self.conv3(h)), ratio=0.6, train=self.train)
h = F.max_pooling_2d(F.relu(self.conv4(h)), 2, stride=2)
h = F.average_pooling_2d(F.relu(self.conv5(h)), 3, stride=1)
h = F.dropout(F.relu(self.fc6(h)), ratio=0.6, train=self.train)
h = self.fc7(h)
self.loss = F.softmax_cross_entropy(h, t)
self.accuracy = F.accuracy(h, t)
if predict:
return h
else:
return self.loss
def reduct(self, x):
h = F.relu(self.conv1_1(x))
h = F.relu(self.bn1(self.conv1_2(h)))
# 100 -> 50
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.bn2(self.conv2_2(h)))
# 50 -> 25
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.bn3(self.conv3_2(h)))
# 25 -> (25 + 1 * 2 - 3) / 3 + 1 = 9
h = F.max_pooling_2d(h, 3, stride=3, pad=1)
h = F.relu(self.conv4_1(h))
h = F.relu(self.bn4(self.conv4_2(h)))
# 9 -> 1
h = F.average_pooling_2d(h, 9, stride=1)
return h
def reduct(self, x):
h = F.relu(self.conv1_1(x))
h = F.relu(self.conv1_2(h))
h = self.bn1(h)
# 100 -> 50
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv2(h))
h = self.bn2(h)
# 50 -> 25
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv3(h))
h = self.bn3(h)
# 25 -> (25 + 1 * 2 - 3) / 3 + 1 = 9
h = F.max_pooling_2d(h, 3, stride=3, pad=1)
h = F.relu(self.conv4(h))
h = self.bn4(h)
# 9 -> 1
h = F.average_pooling_2d(h, 9, stride=1)
return h
def __call__(self, x):
h = self.st(x)
h = F.average_pooling_2d(h, 2, 2) # For TC and RTS datasets
h = F.relu(self.conv1(h))
h = F.max_pooling_2d(h, 2, 2)
h = F.relu(self.conv2(h))
h = F.max_pooling_2d(h, 2, 2)
h = self.fc(h)
return h
def __call__(self, x):
h = F.leaky_relu(self.bias1(self.bn1(self.conv1(x), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.dropout(h, 0.25)
h = F.leaky_relu(self.bias2(self.bn2(self.conv2(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.dropout(h, 0.25)
h = F.leaky_relu(self.bias3(self.bn3(self.conv3(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias4(self.bn4(self.conv4(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias5(self.bn5(self.conv5(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.dropout(h, 0.25)
h = F.leaky_relu(self.bias6(self.bn6(self.conv6(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias7(self.bn7(self.conv7(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias8(self.bn8(self.conv8(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.dropout(h, 0.25)
h = F.leaky_relu(self.bias9(self.bn9(self.conv9(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias10(self.bn10(self.conv10(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias11(self.bn11(self.conv11(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias12(self.bn12(self.conv12(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias13(self.bn13(self.conv13(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.dropout(h, 0.25)
h = F.leaky_relu(self.bias14(self.bn14(self.conv14(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias15(self.bn15(self.conv15(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias16(self.bn16(self.conv16(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias17(self.bn17(self.conv17(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias18(self.bn18(self.conv18(h), finetune=self.finetune)), slope=0.1)
h = F.average_pooling_2d(h, h.shape[-2:])
h = self.fc19(h)
return h
def __call__(self, x):
h = F.leaky_relu(self.bias1(self.bn1(self.conv1(x), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias2(self.bn2(self.conv2(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias3(self.bn3(self.conv3(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias4(self.bn4(self.conv4(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias5(self.bn5(self.conv5(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias6(self.bn6(self.conv6(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias7(self.bn7(self.conv7(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias8(self.bn8(self.conv8(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias9(self.bn9(self.conv9(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias10(self.bn10(self.conv10(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias11(self.bn11(self.conv11(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias12(self.bn12(self.conv12(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias13(self.bn13(self.conv13(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias14(self.bn14(self.conv14(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias15(self.bn15(self.conv15(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias16(self.bn16(self.conv16(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias17(self.bn17(self.conv17(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias18(self.bn18(self.conv18(h), finetune=self.finetune)), slope=0.1)
h = F.average_pooling_2d(h, h.shape[-2:])
h = self.fc19(h)
return h
def max_or_ave(word='ave'):
if word == 'ave':
return F.average_pooling_2d
return F.max_pooling_2d
def max_or_ave(word='ave'):
if word == 'ave':
return F.average_pooling_2d
return F.max_pooling_2d
def max_or_ave(word='ave'):
if word == 'ave':
return F.average_pooling_2d
return F.max_pooling_2d
def __call__(self, x, test):
h = F.relu(self.b1(self.c1(x), test=test))
h = self.b2(self.c2(h), test=test)
if x.data.shape != h.data.shape:
xp = chainer.cuda.get_array_module(x.data)
n, c, hh, ww = x.data.shape
pad_c = h.data.shape[1] - c
p = xp.zeros((n, pad_c, hh, ww), dtype=xp.float32)
p = chainer.Variable(p, volatile=test)
x = F.concat((p, x))
if x.data.shape[2:] != h.data.shape[2:]:
x = F.average_pooling_2d(x, 1, 2)
return h + x
def _global_average_pooling_2d(x):
n_rois, n_channel, H, W = x.array.shape
h = F.average_pooling_2d(x, (H, W), stride=1)
h = F.reshape(h, (n_rois, n_channel))
return h
def __call__(self, x, t, before_fc=False):
self.clear()
h = self.bn1(self.conv1(x), test=not self.train)
h = F.max_pooling_2d(F.relu(h), 3, stride=2)
h = self.res2(h, self.train)
h = self.res3(h, self.train)
h = self.res4(h, self.train)
h = self.res5(h, self.train)
h = F.average_pooling_2d(h, h.data.shape[2], stride=1)
self.feature = h
return h
def maybe_pooling(self, x):
if 2 in self.strides:
return F.average_pooling_2d(x, 1, 2, 0)
return x
def __call__(self, x, train=False):
h = self.conv_bn_relu(x, train)
for i, n in enumerate(self.block_num):
for ii in six.moves.range(n):
h = self['resnext_block_{}_{}'.format(i + 1, ii + 1)](h, train)
batch, channels, height, width = h.data.shape
h = F.reshape(F.average_pooling_2d(h, (height, width)), (batch, channels, 1, 1))
return F.reshape(self.linear(h, train), (batch, self.category_num))
def forward_one_step(self, x, test):
f = activations[self.activation_function]
chain = [x]
# Hidden convolutinal layers
for i in range(self.n_hidden_layers):
u = getattr(self, "layer_%i" % i)(chain[-1])
if self.apply_batchnorm:
if i == 0 and self.apply_batchnorm_to_input is False:
pass
else:
u = getattr(self, "batchnorm_%i" % i)(u, test=test)
chain.append(f(u))
if self.projection_type == "fully_connection":
u = self.projection_layer(chain[-1])
if self.apply_batchnorm:
u = self.projection_batchnorm(u, test=test)
chain.append(f(u))
elif self.projection_type == "global_average_pooling":
batch_size = chain[-1].data.shape[0]
n_maps = chain[-1].data[0].shape[0]
chain.append(F.average_pooling_2d(chain[-1], self.top_filter_size))
chain.append(F.reshape(chain[-1], (batch_size, n_maps)))
u = self.projection_layer(chain[-1])
if self.apply_batchnorm:
u = self.projection_batchnorm(u, test=test)
chain.append(f(u))
else:
raise NotImplementedError()
return chain[-1]
def __call__(self, x):
return functions.average_pooling_2d(x, self.ksize, self.stride, self.pad)