python类relu()的实例源码

ssd_vgg16.py 文件源码 项目:chainercv 作者: chainer 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __call__(self, x):
        """Compute feature maps from a batch of images.

        This method extracts feature maps from
        :obj:`conv4_3`, :obj:`conv7`, :obj:`conv8_2`,
        :obj:`conv9_2`, :obj:`conv10_2`, and :obj:`conv11_2`.

        Args:
            x (ndarray): An array holding a batch of images.
                The images should be resized to :math:`300\\times 300`.

        Returns:
            list of Variable:
            Each variable contains a feature map.
        """

        ys = super(VGG16Extractor300, self).__call__(x)
        for i in range(8, 11 + 1):
            h = ys[-1]
            h = F.relu(self['conv{:d}_1'.format(i)](h))
            h = F.relu(self['conv{:d}_2'.format(i)](h))
            ys.append(h)
        return ys
test_conv_2d_bn_activ.py 文件源码 项目:chainercv 作者: chainer 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        # Make the batch normalization to be the identity function.
        self.l.bn.avg_var[:] = 1
        self.l.bn.avg_mean[:] = 0
        with chainer.using_config('train', False):
            y = self.l(x)

        self.assertIsInstance(y, chainer.Variable)
        self.assertIsInstance(y.array, self.l.xp.ndarray)

        if self.activ == 'relu':
            np.testing.assert_almost_equal(
                cuda.to_cpu(y.array), np.maximum(cuda.to_cpu(x_data), 0),
                decimal=4
            )
        elif self.activ == 'add_one':
            np.testing.assert_almost_equal(
                cuda.to_cpu(y.array), cuda.to_cpu(x_data) + 1,
                decimal=4
            )
voxelchain.py 文件源码 项目:voxcelchain 作者: hiroaki-kaneda 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def fwd(self,x):
        h = F.max_pooling_nd(F.local_response_normalization(F.relu(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_nd(F.local_response_normalization(F.relu(self.conv2(h))), 3, stride=2)
        h = F.dropout(F.relu(self.fc3(h)), train=self.train)
        h = self.fc4(h)
        return h
dqn_agent.py 文件源码 项目:chainer_pong 作者: icoxfog417 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __call__(self, state: np.ndarray):
        _state = self.arr_to_gpu(state)
        s = Variable(_state)
        h1 = F.relu(self.l1(s))
        h2 = F.relu(self.l2(h1))
        h3 = F.relu(self.l3(h2))
        h4 = F.relu(self.l4(h3))
        q_value = self.out(h4)
        return q_value
model.py 文件源码 项目:brain_segmentation 作者: Ryo-Ito 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __call__(self, x, train):
        h = F.relu(self.bnorm1(x, test=not train))
        h = self.conv1(h)
        h = F.relu(self.bnorm2(h, test=not train))
        h = self.conv2(h)
        return h + x
chainer_usage.py 文件源码 项目:nelder_mead 作者: owruby 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        h = F.relu(self.l1(x))
        h = self.l2(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)

        return self.loss
ops.py 文件源码 项目:chainer-gan-experiments 作者: Aixile 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, ch, bn=True, activation=F.relu, k_size=3):
        self.bn = bn
        self.activation = activation
        layers = {}
        pad = k_size//2
        layers['c0'] = L.Convolution2D(ch, ch, 3, 1, pad)
        layers['c1'] = L.Convolution2D(ch, ch, 3, 1, pad)
        if bn:
            layers['bn0'] = L.BatchNormalization(ch)
            layers['bn1'] = L.BatchNormalization(ch)
        super(ResBlock, self).__init__(**layers)
ops.py 文件源码 项目:chainer-gan-experiments 作者: Aixile 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def differentiable_backward(self, g):
        if self.normalize_input:
            raise NotImplementedError

        if self.activation is F.leaky_relu:
            g = backward_leaky_relu(self.x, g)
        elif self.activation is F.relu:
            g = backward_relu(self.x, g)
        elif self.activation is F.tanh:
            g = backward_tanh(self.x, g)
        elif self.activation is F.sigmoid:
            g = backward_sigmoid(self.x, g)
        elif not self.activation is None:
            raise NotImplementedError

        if self.norm == 'ln':
            g = backward_layernormalization(self.nx, g, self.n)
        elif not self.norm is None:
            raise NotImplementedError

        if self.nn == 'down_conv' or self.nn == 'conv':
            g = backward_convolution(None, g, self.c)
        elif self.nn == 'linear':
            g = backward_linear(None, g, self.c)
        elif self.nn == 'up_deconv':
            g = backward_deconvolution(None, g, self.c)
        else:
            raise NotImplementedError

        return g
vgg.py 文件源码 项目:chainer-visualization 作者: hvy 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __call__(self, x):

        """Return a softmax probability distribution over predicted classes."""

        # Convolutional layers
        hs, _ = self.feature_map_activations(x)
        h = hs[-1]

        # Fully connected layers
        h = F.dropout(F.relu(self.fc6(h)))
        h = F.dropout(F.relu(self.fc7(h)))
        h = self.fc8(h)

        return F.softmax(h)
vgg.py 文件源码 项目:chainer-visualization 作者: hvy 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def activations(self, x, layer_idx):

        """Return filter activations projected back to the input space, i.e.
        images with shape (n_feature_maps, 3, 224, 224) for a particula layer.

        The layer index is expected to be 0-based.
        """

        if x.shape[0] != 1:
            raise TypeError('Visualization is only supported for a single image at a time')

        self.check_add_deconv_layers()
        hs, unpooling_sizes = self.feature_map_activations(x)
        hs = [h.data for h in hs]

        activation_maps = []
        n_activation_maps = hs[layer_idx].shape[1]

        xp = self.xp

        for i in range(n_activation_maps):  # For each channel
            h = hs[layer_idx].copy()

            condition = xp.zeros_like(h)
            condition[0][i] = 1  # Keep one feature map and zero all other

            h = Variable(xp.where(condition, h, xp.zeros_like(h)))

            for i in reversed(range(layer_idx+1)):
                p = self.mps[i]
                h = F.upsampling_2d(h, p.indexes, p.kh, p.sy, p.ph, unpooling_sizes[i])
                for deconv in reversed(self.deconv_blocks[i]):
                    h = deconv(F.relu(h))

            activation_maps.append(h.data)

        return xp.concatenate(activation_maps)
models.py 文件源码 项目:chainer-spatial-transformer-networks 作者: hvy 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, x):
        h = self.st(x)
        h = F.average_pooling_2d(h, 2, 2)  # For TC and RTS datasets
        h = F.relu(self.conv1(h))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(h, 2, 2)
        h = self.fc(h)
        return h
models.py 文件源码 项目:chainer-spatial-transformer-networks 作者: hvy 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def affine_matrix(self, x):
        h = F.max_pooling_2d(x, 2, 2)
        h = F.relu(self.conv1(h))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(h, 2, 2)
        theta = F.reshape(self.fc(h), (x.shape[0], 2, 3))
        return theta
lstm.py 文件源码 项目:ROCStory_skipthought_baseline 作者: soskek 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def solve(self, x_seq, pos, neg, train=True, variablize=False, onebyone=True):
        if variablize:# If arguments are just arrays (not variables), make them variables
            x_seq = [chainer.Variable(x, volatile=not train) for x in x_seq]
            x_seq = [F.dropout(x, ratio=self.dropout_ratio, train=train) for x in x_seq]
            pos = self.act1(self.W_candidate(
                F.dropout(chainer.Variable(pos, volatile=not train),
                          ratio=self.dropout_ratio, train=train)))
            neg = self.act1(self.W_candidate(
                F.dropout(chainer.Variable(neg, volatile=not train),
                          ratio=self.dropout_ratio, train=train)))
        if onebyone and train:
            target_x_seq = [self.act1(self.W_candidate(x)) for x in x_seq[:4]]# 1,2,3,4,5-th targets
            onebyone_loss = 0.

        self.LSTM.reset_state()
        for i, x in enumerate(x_seq):
            h = self.LSTM( F.dropout(x, ratio=self.dropout_ratio, train=train) )
            if onebyone and train and target_x_seq[i+1:]:
                pos_score, neg_score = self.calculate_score(h, target_x_seq[i+1:], neg,
                                                            multipos=True)
                onebyone_loss += F.relu( self.margin - pos_score + neg_score )

        pos_score, neg_score = self.calculate_score(h, pos, neg)
        accum_loss = F.relu( self.margin - pos_score + neg_score )
        TorFs = sum(accum_loss.data < self.margin)

        if onebyone and train:
            return F.sum(accum_loss) + F.sum(onebyone_loss), TorFs
        else:
            return F.sum(accum_loss), TorFs
mnist.2.py 文件源码 项目:dockerfiles 作者: floydhub 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __call__(self, x):
        h1 = F.relu(self.l1(x))
        h2 = F.relu(self.l2(h1))
        return self.l3(h2)
vgg16.py 文件源码 项目:fcn 作者: wkentaro 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __call__(self, x, t=None):
        h = x
        h = F.relu(self.conv1_1(h))
        h = F.relu(self.conv1_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv5_1(h))
        h = F.relu(self.conv5_2(h))
        h = F.relu(self.conv5_3(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.dropout(F.relu(self.fc6(h)), ratio=.5)
        h = F.dropout(F.relu(self.fc7(h)), ratio=.5)
        h = self.fc8(h)
        fc8 = h

        self.score = fc8

        if t is None:
            assert not chainer.config.train
            return

        self.loss = F.softmax_cross_entropy(fc8, t)
        self.accuracy = F.accuracy(self.score, t)
        return self.loss
train_a3c_gym.py 文件源码 项目:ai-bs-summer17 作者: uchibe 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def pi_and_v(self, state):

        def forward(head, lstm, tail):
            h = F.relu(head(state))
            h = lstm(h)
            return tail(h)

        pout = forward(self.pi_head, self.pi_lstm, self.pi)
        vout = forward(self.v_head, self.v_lstm, self.v)

        return pout, vout
mlp_bn.py 文件源码 项目:chainerrl 作者: chainer 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, in_size, out_size, hidden_sizes, normalize_input=True,
                 normalize_output=False, nonlinearity=F.relu, last_wscale=1):
        self.in_size = in_size
        self.out_size = out_size
        self.hidden_sizes = hidden_sizes
        self.normalize_input = normalize_input
        self.normalize_output = normalize_output
        self.nonlinearity = nonlinearity

        super().__init__()
        with self.init_scope():
            if normalize_input:
                self.input_bn = L.BatchNormalization(in_size)
                self.input_bn.avg_var[:] = 1

            if hidden_sizes:
                hidden_layers = []
                hidden_layers.append(LinearBN(in_size, hidden_sizes[0]))
                for hin, hout in zip(hidden_sizes, hidden_sizes[1:]):
                    hidden_layers.append(LinearBN(hin, hout))
                self.hidden_layers = chainer.ChainList(*hidden_layers)
                self.output = L.Linear(hidden_sizes[-1], out_size,
                                       initialW=LeCunNormal(last_wscale))
            else:
                self.output = L.Linear(in_size, out_size,
                                       initialW=LeCunNormal(last_wscale))

            if normalize_output:
                self.output_bn = L.BatchNormalization(out_size)
                self.output_bn.avg_var[:] = 1
dqn_head.py 文件源码 项目:chainerrl 作者: chainer 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, n_input_channels=4, n_output_channels=512,
                 activation=F.relu, bias=0.1):
        self.n_input_channels = n_input_channels
        self.activation = activation
        self.n_output_channels = n_output_channels

        layers = [
            L.Convolution2D(n_input_channels, 32, 8, stride=4,
                            initial_bias=bias),
            L.Convolution2D(32, 64, 4, stride=2, initial_bias=bias),
            L.Convolution2D(64, 64, 3, stride=1, initial_bias=bias),
            L.Linear(3136, n_output_channels, initial_bias=bias),
        ]

        super(NatureDQNHead, self).__init__(*layers)
dqn_head.py 文件源码 项目:chainerrl 作者: chainer 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, n_input_channels=4, n_output_channels=256,
                 activation=F.relu, bias=0.1):
        self.n_input_channels = n_input_channels
        self.activation = activation
        self.n_output_channels = n_output_channels

        layers = [
            L.Convolution2D(n_input_channels, 16, 8, stride=4,
                            initial_bias=bias),
            L.Convolution2D(16, 32, 4, stride=2, initial_bias=bias),
            L.Linear(2592, n_output_channels, initial_bias=bias),
        ]

        super(NIPSDQNHead, self).__init__(*layers)
state_q_functions.py 文件源码 项目:chainerrl 作者: chainer 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, ndim_obs, n_actions, n_hidden_channels,
                 n_hidden_layers, nonlinearity=F.relu,
                 last_wscale=1.0):
        super().__init__(model=MLP(
            in_size=ndim_obs, out_size=n_actions,
            hidden_sizes=[n_hidden_channels] * n_hidden_layers,
            nonlinearity=nonlinearity,
            last_wscale=last_wscale))
vgg16.py 文件源码 项目:chainer-faster-rcnn 作者: mitmul 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def __init__(self, train=False):
        super(VGG16, self).__init__()
        self.trunk = [
            ('conv1_1', L.Convolution2D(3, 64, 3, 1, 1)),
            ('_relu1_1', F.ReLU()),
            ('conv1_2', L.Convolution2D(64, 64, 3, 1, 1)),
            ('_relu1_2', F.ReLU()),
            ('_pool1', F.MaxPooling2D(2, 2)),
            ('conv2_1', L.Convolution2D(64, 128, 3, 1, 1)),
            ('_relu2_1', F.ReLU()),
            ('conv2_2', L.Convolution2D(128, 128, 3, 1, 1)),
            ('_relu2_2', F.ReLU()),
            ('_pool2', F.MaxPooling2D(2, 2)),
            ('conv3_1', L.Convolution2D(128, 256, 3, 1, 1)),
            ('_relu3_1', F.ReLU()),
            ('conv3_2', L.Convolution2D(256, 256, 3, 1, 1)),
            ('_relu3_2', F.ReLU()),
            ('conv3_3', L.Convolution2D(256, 256, 3, 1, 1)),
            ('_relu3_3', F.ReLU()),
            ('_pool3', F.MaxPooling2D(2, 2)),
            ('conv4_1', L.Convolution2D(256, 512, 3, 1, 1)),
            ('_relu4_1', F.ReLU()),
            ('conv4_2', L.Convolution2D(512, 512, 3, 1, 1)),
            ('_relu4_2', F.ReLU()),
            ('conv4_3', L.Convolution2D(512, 512, 3, 1, 1)),
            ('_relu4_3', F.ReLU()),
            ('_pool4', F.MaxPooling2D(2, 2)),
            ('conv5_1', L.Convolution2D(512, 512, 3, 1, 1)),
            ('_relu5_1', F.ReLU()),
            ('conv5_2', L.Convolution2D(512, 512, 3, 1, 1)),
            ('_relu5_2', F.ReLU()),
            ('conv5_3', L.Convolution2D(512, 512, 3, 1, 1)),
            ('_relu5_3', F.ReLU()),
        ]
        for name, link in self.trunk:
            if not name.startswith('_'):
                self.add_link(name, link)
test_relu.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def check_backward(self, x_data, y_grad, use_cudnn=True):
        gradient_check.check_backward(
            functions.ReLU(use_cudnn), x_data, y_grad,
            **self.check_backward_options)
VGG16.py 文件源码 项目:deel 作者: uei 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, train=False):
        super(VGG16, self).__init__()
        self.trunk = [
            ('conv1_1', L.Convolution2D(3, 64, 3, 1, 1)),
            ('relu1_1', F.ReLU()),
            ('conv1_2', L.Convolution2D(64, 64, 3, 1, 1)),
            ('relu1_2', F.ReLU()),
            ('pool1', F.MaxPooling2D(2, 2)),
            ('conv2_1', L.Convolution2D(64, 128, 3, 1, 1)),
            ('relu2_1', F.ReLU()),
            ('conv2_2', L.Convolution2D(128, 128, 3, 1, 1)),
            ('relu2_2', F.ReLU()),
            ('pool2', F.MaxPooling2D(2, 2)),
            ('conv3_1', L.Convolution2D(128, 256, 3, 1, 1)),
            ('relu3_1', F.ReLU()),
            ('conv3_2', L.Convolution2D(256, 256, 3, 1, 1)),
            ('relu3_2', F.ReLU()),
            ('conv3_3', L.Convolution2D(256, 256, 3, 1, 1)),
            ('relu3_3', F.ReLU()),
            ('pool3', F.MaxPooling2D(2, 2)),
            ('conv4_1', L.Convolution2D(256, 512, 3, 1, 1)),
            ('relu4_1', F.ReLU()),
            ('conv4_2', L.Convolution2D(512, 512, 3, 1, 1)),
            ('relu4_2', F.ReLU()),
            ('conv4_3', L.Convolution2D(512, 512, 3, 1, 1)),
            ('relu4_3', F.ReLU()),
            ('pool4', F.MaxPooling2D(2, 2)),
            ('conv5_1', L.Convolution2D(512, 512, 3, 1, 1)),
            ('relu5_1', F.ReLU()),
            ('conv5_2', L.Convolution2D(512, 512, 3, 1, 1)),
            ('relu5_2', F.ReLU()),
            ('conv5_3', L.Convolution2D(512, 512, 3, 1, 1)),
            ('relu5_3', F.ReLU()),
            ('rpn_conv_3x3', L.Convolution2D(512, 512, 3, 1, 1)),
            ('rpn_relu_3x3', F.ReLU()),
        ]
        for name, link in self.trunk:
            if 'conv' in name:
                self.add_link(name, link)
cnn_model.py 文件源码 项目:cgp-cnn 作者: sg-nm 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self, x, train):
        param_num = 0
        for name, f in self.forward:
            if 'conv1' in name:
                x = getattr(self, name)(x)
                param_num += (f.W.shape[0]*f.W.shape[2]*f.W.shape[3]*f.W.shape[1]+f.W.shape[0])
            elif 'bn1' in name:
                x = getattr(self, name)(x, not train)
                param_num += x.data.shape[1]*2
        return (F.relu(x), param_num)


# [(CONV -> Batch -> ReLU -> CONV -> Batch) + (x)]
cnn_model.py 文件源码 项目:cgp-cnn 作者: sg-nm 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, ksize, n_out, initializer):
        super(ResBlock, self).__init__()
        pad_size = ksize // 2
        links  = [('conv1', L.Convolution2D(None, n_out, ksize, pad=pad_size, initialW=initializer))]
        links += [('bn1', L.BatchNormalization(n_out))]
        links += [('_act1', F.ReLU())]
        links += [('conv2', L.Convolution2D(n_out, n_out, ksize, pad=pad_size, initialW=initializer))]
        links += [('bn2', L.BatchNormalization(n_out))]
        for link in links:
            if not link[0].startswith('_'):
                self.add_link(*link)
        self.forward = links
net.py 文件源码 项目:chainer-cifar 作者: dsanno 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, depth=18, alpha=16, start_channel=16, skip=False):
        super(PyramidNet, self).__init__()
        channel_diff = float(alpha) / depth
        channel = start_channel
        links = [('bconv1', BatchConv2D(3, channel, 3, 1, 1))]
        skip_size = depth * 3 - 3
        for i in six.moves.range(depth):
            if skip:
                skip_ratio = float(i) / skip_size * 0.5
            else:
                skip_ratio = 0
            in_channel = channel
            channel += channel_diff
            links.append(('py{}'.format(len(links)), PyramidBlock(int(round(in_channel)), int(round(channel)),  skip_ratio=skip_ratio)))
        in_channel = channel
        channel += channel_diff
        links.append(('py{}'.format(len(links)), PyramidBlock(int(round(in_channel)), int(round(channel)), stride=2)))
        for i in six.moves.range(depth - 1):
            if skip:
                skip_ratio = float(i + depth) / skip_size * 0.5
            else:
                skip_ratio = 0
            in_channel = channel
            channel += channel_diff
            links.append(('py{}'.format(len(links)), PyramidBlock(int(round(in_channel)), int(round(channel)),  skip_ratio=skip_ratio)))
        in_channel = channel
        channel += channel_diff
        links.append(('py{}'.format(len(links)), PyramidBlock(int(round(in_channel)), int(round(channel)), stride=2)))
        for i in six.moves.range(depth - 1):
            if skip:
                skip_ratio = float(i + depth * 2 - 1) / skip_size * 0.5
            else:
                skip_ratio = 0
            in_channel = channel
            channel += channel_diff
            links.append(('py{}'.format(len(links)), PyramidBlock(int(round(in_channel)), int(round(channel)),  skip_ratio=skip_ratio)))
        links.append(('bn{}'.format(len(links)), L.BatchNormalization(int(round(channel)))))
        links.append(('_relu{}'.format(len(links)), F.ReLU()))
        links.append(('_apool{}'.format(len(links)), F.AveragePooling2D(8, 1, 0, False)))
        links.append(('fc{}'.format(len(links)), L.Linear(int(round(channel)), 10)))

        for name, f in links:
            if not name.startswith('_'):
                self.add_link(*(name, f))
        self.layers = links


问题


面经


文章

微信
公众号

扫码关注公众号