python类max_pooling_2d()的实例源码

Z2CNN.py 文件源码 项目:gconv_experiments 作者: tscohen 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __call__(self, x, t, train=True, finetune=False):

        h = self.l1(x, train, finetune)
        h = F.dropout(h, self.dr, train)
        h = self.l2(h, train, finetune)

        h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0, cover_all=True, use_cudnn=True)

        h = self.l3(h, train, finetune)
        h = F.dropout(h, self.dr, train)
        h = self.l4(h, train, finetune)
        h = F.dropout(h, self.dr, train)
        h = self.l5(h, train, finetune)
        h = F.dropout(h, self.dr, train)
        h = self.l6(h, train, finetune)
        h = F.dropout(h, self.dr, train)

        h = self.top(h)

        h = F.max(h, axis=-1, keepdims=False)
        h = F.max(h, axis=-1, keepdims=False)

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
googlenet_v3.py 文件源码 项目:googlenet_v3 作者: nutszebra 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self, x, train=True):
        h = self.conv1(x, train)
        h = self.conv2(h, train)
        h = self.conv3(h, train)
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = self.conv4(h, train)
        h = self.conv5(h, train)
        h = self.conv6(h, train)
        h = self.inception_f5_1(h, train)
        h = self.inception_f5_2(h, train)
        h = self.inception_f5_3(h, train)
        h = self.inception_f6_1(h, train)
        h = self.inception_f6_2(h, train)
        h = self.inception_f6_3(h, train)
        h = self.inception_f6_4(h, train)
        h = self.inception_f6_5(h, train)
        h = self.inception_f7_1(h, train)
        h = self.inception_f7_2(h, train)
        num, categories, y, x = h.data.shape
        # global average pooling
        h = F.reshape(F.average_pooling_2d(h, (y, x)), (num, categories))
        h = F.dropout(h, ratio=0.2, train=train)
        h = self.linear(h)
        return h
ja_lstm_tagger.py 文件源码 项目:depccg 作者: masashi-y 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def predict(self, xs):
        """
        batch: list of splitted sentences
        """
        xs = [self.extractor.process(x) for x in xs]
        batchsize = len(xs)
        ws, cs, ls = zip(*xs)
        ws = map(self.emb_word, ws)
        cs = [F.squeeze(
            F.max_pooling_2d(
                self.conv_char(
                    F.expand_dims(
                        self.emb_char(c), 1)), (l, 1)))
                    for c, l in zip(cs, ls)]
        xs_f = [F.dropout(F.concat([w, c]),
            self.dropout_ratio, train=self.train) for w, c in zip(ws, cs)]
        xs_b = [x[::-1] for x in xs_f]
        cx_f, hx_f, cx_b, hx_b = self._init_state(batchsize)
        _, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
        _, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
        hs_b = [x[::-1] for x in hs_b]
        ys = [self.linear2(F.relu(self.linear1(F.concat([h_f, h_b]))))
                    for h_f, h_b in zip(hs_f, hs_b)]
        return [y.data[1:-1] for y in ys]
main.py 文件源码 项目:cnn-text-classification 作者: marevol 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __call__(self, x, train=True):
        hlist = []
        h_0 = self['embed'](x)
        if not self.non_static:
            h_0 = Variable(h_0.data)
        h_1 = F.reshape(h_0, (h_0.shape[0], 1, h_0.shape[1], h_0.shape[2]))
        for filter_h in self.filter_sizes:
            pool_size = (self.doc_length - filter_h + 1, 1)
            h = F.max_pooling_2d(F.relu(self['conv' + str(filter_h)](h_1)), pool_size)
            hlist.append(h)
        h = F.concat(hlist)
        pos = 0
        while pos < len(self.hidden_units) - 1:
            h = F.dropout(F.relu(self['l' + str(pos)](h)))
            pos += 1
        y = F.relu(self['l' + str(pos)](h))
        return y
mdl_rgb_d.py 文件源码 项目:MultimodalDL 作者: masataka46 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        self.clear()
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv2(h))), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
caffe_function.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _setup_pooling(self, layer):
        param = layer.pooling_param
        ksize = _get_ksize(param)
        stride = _get_stride(param)
        pad = _get_pad(param)

        if param.pool == param.MAX:
            func = functions.max_pooling_2d
        elif param.pool == param.AVE:
            func = functions.average_pooling_2d
        else:
            raise RuntimeError('Stochastic pooling is not supported')

        fw = _SingleArgumentFunction(func, ksize, stride=stride, pad=pad)
        self.forwards[layer.name] = fw
        self._add_layer(layer)
alexbn.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        self.clear()
        h = self.bn1(self.conv1(x), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = self.bn2(self.conv2(h), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
test_max_pooling_2d.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def check_forward(self, x_data, use_cudnn=True):
        x = chainer.Variable(x_data)
        y = functions.max_pooling_2d(x, 3, stride=2, pad=1,
                                     cover_all=self.cover_all,
                                     use_cudnn=use_cudnn)
        self.assertEqual(y.data.dtype, self.dtype)
        y_data = cuda.to_cpu(y.data)

        self.assertEqual(self.gy.shape, y_data.shape)
        for k in six.moves.range(2):
            for c in six.moves.range(3):
                x = self.x[k, c]
                if self.cover_all:
                    expect = numpy.array([
                        [x[0:2, 0:2].max(), x[0:2, 1:3].max()],
                        [x[1:4, 0:2].max(), x[1:4, 1:3].max()],
                        [x[3:4, 0:2].max(), x[3:4, 1:3].max()]])
                else:
                    expect = numpy.array([
                        [x[0:2, 0:2].max(), x[0:2, 1:3].max()],
                        [x[1:4, 0:2].max(), x[1:4, 1:3].max()]])
                gradient_check.assert_allclose(expect, y_data[k, c])
YOLOtiny.py 文件源码 项目:YOLO_chainer 作者: ashitani 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def  predict(self,x):
        h = F.leaky_relu(self.c1(x),slope=0.1)
        h = F.max_pooling_2d(h,ksize=2,stride=2,pad=0)
        h = F.leaky_relu(self.c3(h),slope=0.1)
        h = F.max_pooling_2d(h,ksize=2,stride=2,pad=0)
        h = F.leaky_relu(self.c5(h),slope=0.1)
        h = F.max_pooling_2d(h,ksize=2,stride=2,pad=0)
        h = F.leaky_relu(self.c7(h),slope=0.1)
        h = F.max_pooling_2d(h,ksize=2,stride=2,pad=0)
        h = F.leaky_relu(self.c9(h),slope=0.1)
        h = F.max_pooling_2d(h,ksize=2,stride=2,pad=0)
        h = F.leaky_relu(self.c11(h),slope=0.1)
        h = F.max_pooling_2d(h,ksize=2,stride=2,pad=0)
        h = F.leaky_relu(self.c13(h),slope=0.1)
        h = F.leaky_relu(self.c14(h),slope=0.1)
        h = F.leaky_relu(self.c15(h),slope=0.1)
        h = F.leaky_relu(self.l16(h),slope=0.1)
        h = F.leaky_relu(self.l17(h),slope=0.1)
        # skip dropout
        h = self.l19(h)

        return h
alex_net.py 文件源码 项目:DeepPoseComparison 作者: ynaka81 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def predict(self, x):
        """ Predict 2D pose from image. """
        # layer1
        h = F.relu(self.conv1(x))
        h = F.max_pooling_2d(h, 3, stride=2)
        # layer2
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(h, 3, stride=2)
        # layer3-5
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.relu(self.conv5(h))
        h = F.max_pooling_2d(h, 3, stride=2)
        # layer6-8
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)
        return F.reshape(h, (-1, self.Nj, 2))
convolutional_pose_machine.py 文件源码 项目:convolutional-pose-machines-chainer 作者: tomoyukun 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __call__(self, x):
        h = self.conv1(x)
        h = F.relu(h)
        h = F.max_pooling_2d(h, ksize=3, stride=2)
        h = self.conv2(h)
        h = F.relu(h)
        h = F.max_pooling_2d(h, ksize=3, stride=2)
        h = self.conv3(h)
        h = F.relu(h)
        h = F.max_pooling_2d(h, ksize=3, stride=2)
        h = self.conv4(h)
        h = F.relu(h)
        h = self.conv5(h)
        h = F.relu(h)
        h = self.conv6(h)
        h = F.relu(h)
        h = self.conv7(h)

        return h
__init__.py 文件源码 项目:convolutional-pose-machines-chainer 作者: tomoyukun 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, x):
        h = self.conv1(x)
        h = F.relu(h)
        h = F.max_pooling_2d(h, ksize=3, stride=2)
        h = self.conv2(h)
        h = F.relu(h)
        h = F.max_pooling_2d(h, ksize=3, stride=2)
        h = self.conv3(h)
        h = F.relu(h)
        h = F.max_pooling_2d(h, ksize=3, stride=2)
        h = self.conv4(h)
        h = F.relu(h)
        h = self.conv5(h)
        h = F.relu(h)
        h = self.conv6(h)
        h = F.relu(h)
        h = self.conv7(h)

        return h
CNN.py 文件源码 项目:vsmlib 作者: undertherain 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __call__(self, xs):

        if self.freeze:
            self.embed.disable_update()
        xs = self.embed(xs)
        batchsize, height, width = xs.shape
        xs = F.reshape(xs, (batchsize, 1, height, width))
        conv3_xs = self.conv3(xs)
        conv4_xs = self.conv4(xs)
        conv5_xs = self.conv5(xs)
        h1 = F.max_pooling_2d(F.relu(conv3_xs), conv3_xs.shape[2])
        h2 = F.max_pooling_2d(F.relu(conv4_xs), conv4_xs.shape[2])
        h3 = F.max_pooling_2d(F.relu(conv5_xs), conv5_xs.shape[2])
        concat_layer = F.concat([h1, h2, h3], axis=1)
        with chainer.using_config('train', True):
            y = self.l1(F.dropout(F.tanh(concat_layer)))
        return y
models.py 文件源码 项目:DeepLearning 作者: fushuyue 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, x):

        h = F.relu(self.conv1_1(x))
        h = F.relu(self.conv1_2(h))
        h = F.max_pooling_2d(h, 2, 2)

        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        h = F.max_pooling_2d(h, 2, 2)

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        h = F.relu(self.conv3_4(h))
        h = F.max_pooling_2d(h, 2, 2)

        h = F.relu(self.fc4(h))
        h = F.relu(self.fc5(h))
        h = self.fc6(h)
        L_out = h
        return L_out


# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
net.py 文件源码 项目:chainer-cifar 作者: dsanno 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __call__(self, x):
        h = self.bconv1_1(x)
        h = self.bconv1_2(h)
        h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
        h = self.bconv2_1(h)
        h = self.bconv2_2(h)
        h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
        h = self.bconv3_1(h)
        h = self.bconv3_2(h)
        h = self.bconv3_3(h)
        h = self.bconv3_4(h)
        h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
        h = F.relu(self.fc4(F.dropout(h)))
        h = F.relu(self.fc5(F.dropout(h)))
        h = self.fc6(h)
        return h
net.py 文件源码 项目:chainer-cifar 作者: dsanno 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, x):
        h = self.l0(x)
        h = self.l1_1(h)
        h = self.l1_2(h)
        h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
        h = self.l2_1(h)
        h = self.l2_2(h)
        h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
        h = self.l3_1(h)
        h = self.l3_2(h)
        h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
        h = self.l4_1(h)
        h = self.l4_2(h)
        h = F.dropout(h, 0.25)
        h = F.average_pooling_2d(h, 4, 1, 0)
        h = self.fc(h)
        return h
net.py 文件源码 项目:chainer-cifar 作者: dsanno 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __call__(self, x):
        h = self.bconv1_1(x)
        h = self.bconv1_2(h)
        h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
        h = self.bconv2_1(h)
        h = self.bconv2_2(h)
        h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
        h = self.bconv3_1(h)
        h = self.bconv3_2(h)
        h = self.bconv3_3(h)
        h = self.bconv3_4(h)
        h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
        h = F.relu(self.fc4(F.dropout(h)))
        h = F.relu(self.fc5(F.dropout(h)))
        h = self.fc6(h)
        return h
caffefunction.py 文件源码 项目:deel 作者: uei 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _setup_pooling(self, layer):
        param = layer.pooling_param
        ksize = _get_ksize(param)
        stride = _get_stride(param)
        pad = _get_pad(param)

        if param.pool == param.MAX:
            func = functions.max_pooling_2d
        elif param.pool == param.AVE:
            func = functions.average_pooling_2d
        else:
            raise RuntimeError('Stochastic pooling is not supported')

        fw = _SingleArgumentFunction(func, ksize, stride=stride, pad=pad)
        self.forwards[layer.name] = fw
        self._add_layer(layer)
googlenet_v2.py 文件源码 项目:googlenet_v2 作者: nutszebra 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __call__(self, x, train=True):
        h = self.conv1(x, train)
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = self.conv2_1x1(h, train)
        h = self.conv2_3x3(h, train)
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = self.inception3a(h, train)
        h = self.inception3b(h, train)
        h = self.inception3c(h, train)
        h = self.inception4a(h, train)
        h = self.inception4b(h, train)
        h = self.inception4c(h, train)
        h = self.inception4d(h, train)
        h = self.inception4e(h, train)
        h = self.inception5a(h, train)
        h = self.inception5b(h, train)
        num, categories, y, x = h.data.shape
        # global average pooling
        h = F.reshape(F.average_pooling_2d(h, (y, x)), (num, categories))
        h = self.linear(h)
        return h
spp_discriminator.py 文件源码 项目:Semantic-Segmentation-using-Adversarial-Networks 作者: oyam 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __call__(self, x):
        h = F.relu(self.conv1_1(x))
        h = F.relu(self.conv1_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D)
        h = F.tanh(self.fc4(h))
        h = F.dropout(h, ratio=.5, train=self.train)
        h = F.tanh(self.fc5(h))
        h = F.dropout(h, ratio=.5, train=self.train)
        h = self.fc6(h)
        return h
googlenet.py 文件源码 项目:googlenet 作者: nutszebra 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __call__(self, x, train=True):
        h = F.relu(self.conv1(x))
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = F.relu(self.conv2_1x1(h))
        h = F.relu(self.conv2_3x3(h))
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = self.inception3a(h)
        h = self.inception3b(h)
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = self.inception4a(h)
        h = self.inception4b(h)
        h = self.inception4c(h)
        h = self.inception4d(h)
        h = self.inception4e(h)
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = self.inception5a(h)
        h = F.relu(self.inception5b(h))
        num, categories, y, x = h.data.shape
        # global average pooling
        h = F.reshape(F.average_pooling_2d(h, (y, x)), (num, categories))
        h = F.dropout(h, ratio=0.4, train=train)
        h = self.linear(h)
        return h
model.py 文件源码 项目:squeezenet-chainer 作者: jayhack 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __call__(self, x, train=False):
        h = F.relu(self.conv1(x))
        h = F.max_pooling_2d(h, 3, stride=2)

        h = self.fire2(h)
        h = self.fire3(h)
        h = self.fire4(h)

        h = F.max_pooling_2d(h, 3, stride=2)

        h = self.fire5(h)
        h = self.fire6(h)
        h = self.fire7(h)
        h = self.fire8(h)

        h = F.max_pooling_2d(h, 3, stride=2)

        h = self.fire9(h)
        h = F.dropout(h, ratio=0.5, train=train)

        h = F.relu(self.conv10(h))
        h = F.average_pooling_2d(h, 13)

        return F.reshape(h, (-1, 1000))
vgg_a.py 文件源码 项目:vgg 作者: nutszebra 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __call__(self, x, train=True):
        h = F.relu(self.conv1(x))
        h = F.max_pooling_2d(h, ksize=(2, 2), stride=(2, 2), pad=(0, 0))
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(h, ksize=(2, 2), stride=(2, 2), pad=(0, 0))
        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.max_pooling_2d(h, ksize=(2, 2), stride=(2, 2), pad=(0, 0))
        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.max_pooling_2d(h, ksize=(2, 2), stride=(2, 2), pad=(0, 0))
        h = F.relu(self.conv5_1(h))
        h = F.relu(self.conv5_2(h))
        h = F.max_pooling_2d(h, ksize=(2, 2), stride=(2, 2), pad=(0, 0))
        h = F.dropout(h, ratio=0.5, train=train)
        h = F.relu(self.fc1(h))
        h = F.dropout(h, ratio=0.5, train=train)
        h = F.relu(self.fc2(h))
        h = self.fc3(h)
        num, categories, y, x = h.data.shape
        # global average pooling
        h = F.reshape(F.average_pooling_2d(h, (y, x)), (num, categories))
        return h
small_dnn.py 文件源码 项目:TripletEmbedding 作者: hrantzsch 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __call__(self, x):
        self.clear()

        h = self.conv1(x)
        h = F.max_pooling_2d(h, (3, 5), stride=2)

        h = self.conv2(h)

        h = self.conv3(h)
        h = F.max_pooling_2d(h, 3, stride=2, pad=1)

        h = self.conv4(h)

        h = self.fc1(h)
        h = self.fc2(h)

        return h
ResNet_32.py 文件源码 项目:kaggle-dsg-qualification 作者: Ignotus 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def h(self, x, train, finetune):
        # First convolution layer.
        h = self[0](x)

        h = F.dropout(h, ratio=self.dropout, train=train)

        # Residual blocks.
        for i in range(1, len(self) - 2):
            h = self[i](h, train, finetune)

        # Batch normalization.
        h = self[-2](h, test=not train, finetune=finetune)
        h = F.relu(h)

        # Average pooling.
        h = F.max_pooling_2d(h, ksize=2, pad=0)

        # Prediction layer 5.
        h = self[-1](h)
        h = F.reshape(h, (h.data.shape[0], 5))

        return h
ResNet.py 文件源码 项目:kaggle-dsg-qualification 作者: Ignotus 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def h(self, x, train, finetune):
        # First convolution layer.
        h = self[0](x)

        h = F.dropout(h, ratio=self.dropout, train=train)

        # Residual blocks.
        for i in range(1, len(self) - 2):
            h = self[i](h, train, finetune)

        # Batch normalization.
        h = self[-2](h, test=not train, finetune=finetune)
        h = F.relu(h)

        # Average pooling.
        h = F.max_pooling_2d(h, ksize=4, pad=0)

        # Prediction layer 5.
        h = self[-1](h)
        h = F.reshape(h, (h.data.shape[0], 5))

        return h
ResNet50.py 文件源码 项目:chainer-caption 作者: apple2373 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        self.clear()
        h = self.bn1(self.conv1(x), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = self.res2(h, self.train)
        h = self.res3(h, self.train)
        h = self.res4(h, self.train)
        h = self.res5(h, self.train)
        h = F.average_pooling_2d(h, 7, stride=1)
        if t=="feature":
            return h
        h = self.fc(h)

        if self.train:
            self.loss = F.softmax_cross_entropy(h, t)
            self.accuracy = F.accuracy(h, t)
            return self.loss
        else:
            return h
model_cnn.py 文件源码 项目:cifar-10 作者: shiba24 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __call__(self, x, t, predict=False):
        h = self.bn1(self.conv1(x), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 2, stride=2)
        h = self.bn2(self.conv2(h), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 2, stride=2)
        h = F.dropout(F.relu(self.conv3(h)), ratio=0.6, train=self.train)
        h = F.max_pooling_2d(F.relu(self.conv4(h)), 2, stride=2)
        h = F.average_pooling_2d(F.relu(self.conv5(h)), 3, stride=1)
        h = F.dropout(F.relu(self.fc6(h)), ratio=0.6, train=self.train)
        h = self.fc7(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        if predict:
            return h
        else:
            return self.loss
cnn_feature_extractors.py 文件源码 项目:face-classifier-cnn 作者: nknytk 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def reduct(self, x):
        h = F.relu(self.conv1_1(x))
        h = F.relu(self.bn1(self.conv1_2(h)))
        # 100 -> 50
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv2_1(h))
        h = F.relu(self.bn2(self.conv2_2(h)))
        # 50 -> 25
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.bn3(self.conv3_2(h)))
        # 25 -> (25 + 1 * 2 - 3) / 3 + 1 = 9
        h = F.max_pooling_2d(h, 3, stride=3, pad=1)

        h = F.relu(self.conv4_1(h))
        h = F.relu(self.bn4(self.conv4_2(h)))
        # 9 -> 1
        h = F.average_pooling_2d(h, 9, stride=1)

        return h
cnn_feature_extractors.py 文件源码 项目:face-classifier-cnn 作者: nknytk 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def reduct(self, x):
        h = F.relu(self.conv1_1(x))
        h = F.relu(self.conv1_2(h))
        h = self.bn1(h)
        # 100 -> 50
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv2(h))
        h = self.bn2(h)
        # 50 -> 25
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv3(h))
        h = self.bn3(h)
        # 25 -> (25 + 1 * 2 - 3) / 3 + 1 = 9
        h = F.max_pooling_2d(h, 3, stride=3, pad=1)

        h = F.relu(self.conv4(h))
        h = self.bn4(h)
        # 9 -> 1
        h = F.average_pooling_2d(h, 9, stride=1)

        return h


问题


面经


文章

微信
公众号

扫码关注公众号