python类dropout()的实例源码

vaelm.py 文件源码 项目:vaelm 作者: TatsuyaShirakawa 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __call__(self, w, train=True, dpratio=0.5):

        x = self.embed(w)
        self.maybe_init_state(len(x.data), x.data.dtype)

        for i in range(self.num_layers):

            if self.ignore_label is not None:
                enable = (x.data != 0)

            c = F.dropout(self.get_c(i), train=train, ratio=dpratio)
            h = F.dropout(self.get_h(i), train=train, ratio=dpratio)
            x = F.dropout(x, train=train, ratio=dpratio)
            c, h = self.get_l(i)(c, h, x)

            if self.ignore_label != None:
                self.set_c(i, F.where(enable, c, self.get_c(i)))
                self.set_h(i, F.where(enable, h, self.get_h(i)))
            else:
                self.set_c(i, c)
                self.set_h(i, h)

            x = self.get_h(i)
net.py 文件源码 项目:chainer-cyclegan 作者: Aixile 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False, noise=False):
        self.bn = bn
        self.activation = activation
        self.dropout = dropout
        self.sample = sample
        self.noise = noise
        layers = {}
        w = chainer.initializers.Normal(0.02)
        if sample=='down':
            layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)
        elif sample=='none-9':
            layers['c'] = L.Convolution2D(ch0, ch1, 9, 1, 4, initialW=w)
        elif sample=='none-7':
            layers['c'] = L.Convolution2D(ch0, ch1, 7, 1, 3, initialW=w)
        elif sample=='none-5':
            layers['c'] = L.Convolution2D(ch0, ch1, 5, 1, 2, initialW=w)
        else:
            layers['c'] = L.Convolution2D(ch0, ch1, 3, 1, 1, initialW=w)
        if bn:
            if self.noise:
                layers['batchnorm'] = L.BatchNormalization(ch1, use_gamma=False)
            else:
                layers['batchnorm'] = L.BatchNormalization(ch1)
        super(CBR, self).__init__(**layers)
net.py 文件源码 项目:chainer-cyclegan 作者: Aixile 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __call__(self, x, test):
        if self.sample=="down" or self.sample=="none" or self.sample=='none-9' or self.sample=='none-7' or self.sample=='none-5':
            h = self.c(x)
        elif self.sample=="up":
            h = F.unpooling_2d(x, 2, 2, 0, cover_all=False)
            h = self.c(h)
        else:
            print("unknown sample method %s"%self.sample)
        if self.bn:
            h = self.batchnorm(h, test=test)
        if self.noise:
            h = add_noise(h, test=test)
        if self.dropout:
            h = F.dropout(h, train=not test)
        if not self.activation is None:
            h = self.activation(h)
        return h
P4MAllCNNC.py 文件源码 项目:gconv_experiments 作者: tscohen 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __call__(self, x, t, train=True, finetune=False):

        h = x
        h = F.dropout(h, ratio=0.2, train=train)
        h = self.l1(h, train, finetune)
        h = self.l2(h, train, finetune)
        h = self.l3(h, train, finetune)
        h = F.dropout(h, ratio=0.5, train=train)
        h = self.l4(h, train, finetune)
        h = self.l5(h, train, finetune)
        h = self.l6(h, train, finetune)
        h = F.dropout(h, ratio=0.5, train=train)
        h = self.l7(h, train, finetune)
        h = self.l8(h, train, finetune)
        h = self.l9(h, train, finetune)

        h = F.sum(h, axis=-1)
        h = F.sum(h, axis=-1)
        h = F.sum(h, axis=-1)
        h /= 8 * 8 * 8

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
P4AllCNNC.py 文件源码 项目:gconv_experiments 作者: tscohen 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __call__(self, x, t, train=True, finetune=False):

        h = x
        h = F.dropout(h, ratio=0.2, train=train)
        h = self.l1(h, train, finetune)
        h = self.l2(h, train, finetune)
        h = self.l3(h, train, finetune)
        h = F.dropout(h, ratio=0.5, train=train)
        h = self.l4(h, train, finetune)
        h = self.l5(h, train, finetune)
        h = self.l6(h, train, finetune)
        h = F.dropout(h, ratio=0.5, train=train)
        h = self.l7(h, train, finetune)
        h = self.l8(h, train, finetune)
        h = self.l9(h, train, finetune)

        h = F.sum(h, axis=-1)
        h = F.sum(h, axis=-1)
        h = F.sum(h, axis=-1)
        h /= 8 * 8 * 4

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
AllCNNC.py 文件源码 项目:gconv_experiments 作者: tscohen 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __call__(self, x, t, train=True, finetune=False):

        h = x
        h = F.dropout(h, ratio=0.2, train=train)
        h = self.l1(h, train, finetune)
        h = self.l2(h, train, finetune)
        h = self.l3(h, train, finetune)
        h = F.dropout(h, ratio=0.5, train=train)
        h = self.l4(h, train, finetune)
        h = self.l5(h, train, finetune)
        h = self.l6(h, train, finetune)
        h = F.dropout(h, ratio=0.5, train=train)
        h = self.l7(h, train, finetune)
        h = self.l8(h, train, finetune)
        h = self.l9(h, train, finetune)

        h = F.sum(h, axis=-1)
        h = F.sum(h, axis=-1)
        h /= 8 * 8

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
P4CNN.py 文件源码 项目:gconv_experiments 作者: tscohen 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __call__(self, x, t, train=True, finetune=False):

        h = self.l1(x, train, finetune)
        # h = F.dropout(h, self.dr, train)
        h = self.l2(h, train, finetune)

        h = plane_group_spatial_max_pooling(h, ksize=2, stride=2, pad=0, cover_all=True, use_cudnn=True)

        h = self.l3(h, train, finetune)
        # h = F.dropout(h, self.dr, train)
        h = self.l4(h, train, finetune)
        # h = F.dropout(h, self.dr, train)
        h = self.l5(h, train, finetune)
        # h = F.dropout(h, self.dr, train)
        h = self.l6(h, train, finetune)

        h = self.top(h)

        h = F.max(h, axis=-3, keepdims=False)
        h = F.max(h, axis=-1, keepdims=False)
        h = F.max(h, axis=-1, keepdims=False)

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
Z2CNN.py 文件源码 项目:gconv_experiments 作者: tscohen 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __call__(self, x, t, train=True, finetune=False):

        h = self.l1(x, train, finetune)
        h = F.dropout(h, self.dr, train)
        h = self.l2(h, train, finetune)

        h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0, cover_all=True, use_cudnn=True)

        h = self.l3(h, train, finetune)
        h = F.dropout(h, self.dr, train)
        h = self.l4(h, train, finetune)
        h = F.dropout(h, self.dr, train)
        h = self.l5(h, train, finetune)
        h = F.dropout(h, self.dr, train)
        h = self.l6(h, train, finetune)
        h = F.dropout(h, self.dr, train)

        h = self.top(h)

        h = F.max(h, axis=-1, keepdims=False)
        h = F.max(h, axis=-1, keepdims=False)

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
googlenet_v3.py 文件源码 项目:googlenet_v3 作者: nutszebra 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __call__(self, x, train=True):
        h = self.conv1(x, train)
        h = self.conv2(h, train)
        h = self.conv3(h, train)
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = self.conv4(h, train)
        h = self.conv5(h, train)
        h = self.conv6(h, train)
        h = self.inception_f5_1(h, train)
        h = self.inception_f5_2(h, train)
        h = self.inception_f5_3(h, train)
        h = self.inception_f6_1(h, train)
        h = self.inception_f6_2(h, train)
        h = self.inception_f6_3(h, train)
        h = self.inception_f6_4(h, train)
        h = self.inception_f6_5(h, train)
        h = self.inception_f7_1(h, train)
        h = self.inception_f7_2(h, train)
        num, categories, y, x = h.data.shape
        # global average pooling
        h = F.reshape(F.average_pooling_2d(h, (y, x)), (num, categories))
        h = F.dropout(h, ratio=0.2, train=train)
        h = self.linear(h)
        return h
ddqn.py 文件源码 项目:double-dqn 作者: musyoku 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def forward_one_step(self, x, test):
        f = activations[self.activation_function]
        chain = [x]

        # Hidden layers
        for i in range(self.n_hidden_layers):
            u = getattr(self, "layer_%i" % i)(chain[-1])
            if self.apply_batchnorm:
                if i == 0 and self.apply_batchnorm_to_input is False:
                    pass
                else:
                    u = getattr(self, "batchnorm_%i" % i)(u, test=test)
            output = f(u)
            if self.apply_dropout:
                output = F.dropout(output, train=not test)
            chain.append(output)

        # Output
        u = getattr(self, "layer_%i" % self.n_hidden_layers)(chain[-1])
        if self.apply_batchnorm:
            u = getattr(self, "batchnorm_%i" % self.n_hidden_layers)(u, test=test)
        chain.append(f(u))

        return chain[-1]
lstm_parser_old.py 文件源码 项目:depccg 作者: masashi-y 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def forward(self, ws, ss, ps):
        batchsize = len(ws)
        xp = chainer.cuda.get_array_module(ws[0])
        ws = map(self.emb_word, ws)
        ss = [F.reshape(self.emb_suf(s), (s.shape[0], 4 * self.afix_dim)) for s in ss]
        ps = [F.reshape(self.emb_prf(s), (s.shape[0], 4 * self.afix_dim)) for s in ps]
        xs_f = [F.dropout(F.concat([w, s, p]),
            self.dropout_ratio, train=self.train) for w, s, p in zip(ws, ss, ps)]
        xs_b = [x[::-1] for x in xs_f]
        cx_f, hx_f, cx_b, hx_b = self._init_state(xp, batchsize)
        _, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
        _, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
        hs_b = [x[::-1] for x in hs_b]
        # ys: [(sentence length, number of category)]
        hs = [F.concat([h_f, h_b]) for h_f, h_b in zip(hs_f, hs_b)]

        cat_ys = [self.linear_cat2(
            F.dropout(F.elu(self.linear_cat1(h)), 0.5, train=self.train)) for h in hs]

        dep_ys = [self.biaffine(
            F.elu(F.dropout(self.linear_dep(h), 0.32, train=self.train)),
            F.elu(F.dropout(self.linear_head(h), 0.32, train=self.train))) for h in hs]

        return cat_ys, dep_ys
lstm_tagger.py 文件源码 项目:depccg 作者: masashi-y 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def forward(self, ws, ss, ps):
        batchsize = len(ws)
        xp = chainer.cuda.get_array_module(ws[0])
        ws = map(self.emb_word, ws)
        ss = [F.reshape(self.emb_suf(s), (s.shape[0], 4 * self.afix_dim)) for s in ss]
        ps = [F.reshape(self.emb_prf(s), (s.shape[0], 4 * self.afix_dim)) for s in ps]
        # [(sentence length, (word_dim + suf_dim + prf_dim))]
        xs_f = [F.dropout(F.concat([w, s, p]),
            self.dropout_ratio, train=self.train) for w, s, p in zip(ws, ss, ps)]
        xs_b = [x[::-1] for x in xs_f]
        cx_f, hx_f, cx_b, hx_b = self._init_state(xp, batchsize)
        _, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
        _, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
        hs_b = [x[::-1] for x in hs_b]
        # ys: [(sentence length, number of category)]
        ys = [self.linear2(F.relu(
                self.linear1(F.concat([h_f, h_b]))))
                    for h_f, h_b in zip(hs_f, hs_b)]
        return ys
ja_lstm_tagger.py 文件源码 项目:depccg 作者: masashi-y 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def predict(self, xs):
        """
        batch: list of splitted sentences
        """
        xs = [self.extractor.process(x) for x in xs]
        batchsize = len(xs)
        ws, cs, ls = zip(*xs)
        ws = map(self.emb_word, ws)
        cs = [F.squeeze(
            F.max_pooling_2d(
                self.conv_char(
                    F.expand_dims(
                        self.emb_char(c), 1)), (l, 1)))
                    for c, l in zip(cs, ls)]
        xs_f = [F.dropout(F.concat([w, c]),
            self.dropout_ratio, train=self.train) for w, c in zip(ws, cs)]
        xs_b = [x[::-1] for x in xs_f]
        cx_f, hx_f, cx_b, hx_b = self._init_state(batchsize)
        _, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
        _, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
        hs_b = [x[::-1] for x in hs_b]
        ys = [self.linear2(F.relu(self.linear1(F.concat([h_f, h_b]))))
                    for h_f, h_b in zip(hs_f, hs_b)]
        return [y.data[1:-1] for y in ys]
lstm_tagger_old.py 文件源码 项目:depccg 作者: masashi-y 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def forward(self, ws, ss, ps):
        batchsize = len(ws)
        xp = chainer.cuda.get_array_module(ws[0])
        ws = map(self.emb_word, ws)
        ss = [F.reshape(self.emb_suf(s), (s.shape[0], 4 * self.afix_dim)) for s in ss]
        ps = [F.reshape(self.emb_prf(s), (s.shape[0], 4 * self.afix_dim)) for s in ps]
        # [(sentence length, (word_dim + suf_dim + prf_dim))]
        xs_f = [F.dropout(F.concat([w, s, p]),
            self.dropout_ratio, train=self.train) for w, s, p in zip(ws, ss, ps)]
        xs_b = [x[::-1] for x in xs_f]
        cx_f, hx_f, cx_b, hx_b = self._init_state(xp, batchsize)
        _, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
        _, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
        hs_b = [x[::-1] for x in hs_b]
        # ys: [(sentence length, number of category)]
        ys = [self.linear2(F.relu(
                self.linear1(F.concat([h_f, h_b]))))
                    for h_f, h_b in zip(hs_f, hs_b)]
        return ys
lstm_parser.py 文件源码 项目:depccg 作者: masashi-y 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def forward(self, ws, ss, ps):
        batchsize = len(ws)
        xp = chainer.cuda.get_array_module(ws[0])
        ws = map(self.emb_word, ws)
        ss = [F.reshape(self.emb_suf(s), (s.shape[0], 4 * self.afix_dim)) for s in ss]
        ps = [F.reshape(self.emb_prf(s), (s.shape[0], 4 * self.afix_dim)) for s in ps]
        xs_f = [F.dropout(F.concat([w, s, p]),
            self.dropout_ratio, train=self.train) for w, s, p in zip(ws, ss, ps)]
        xs_b = [x[::-1] for x in xs_f]
        cx_f, hx_f, cx_b, hx_b = self._init_state(xp, batchsize)
        _, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
        _, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
        hs_b = [x[::-1] for x in hs_b]
        # ys: [(sentence length, number of category)]
        hs = [F.concat([h_f, h_b]) for h_f, h_b in zip(hs_f, hs_b)]

        cat_ys = [self.linear_cat2(
            F.dropout(F.elu(self.linear_cat1(h)), 0.5, train=self.train)) for h in hs]

        dep_ys = [self.biaffine(
            F.elu(F.dropout(self.linear_dep(h), 0.32, train=self.train)),
            F.elu(F.dropout(self.linear_head(h), 0.32, train=self.train))) for h in hs]

        return cat_ys, dep_ys
lstm_tagger_ph.py 文件源码 项目:depccg 作者: masashi-y 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def forward(self, ws, ss, ps):
        batchsize, length = ws.shape
        xp = chainer.cuda.get_array_module(ws[0])
        ws = self.emb_word(ws) # (batch, length, word_dim)
        ss = F.reshape(self.emb_suf(ss), (batchsize, length, -1))
        ps = F.reshape(self.emb_prf(ps), (batchsize, length, -1))
        hs = F.transpose(F.concat([ws, ss, ps], 2), (1, 0, 2))
        hs = F.dropout(hs, self.dropout_ratio, train=self.train)
        hs = F.split_axis(hs, length, 0)
        hs_f = []
        hs_b = []
        self._init_state()
        for h_in_f, h_in_b in zip(hs, reversed(hs)):
            h_f = self.lstm_f2(self.lstm_f1(F.squeeze(h_in_f, 0)))
            hs_f.append(h_f)
            h_b = self.lstm_b2(self.lstm_b1(F.squeeze(h_in_b, 0)))
            hs_b.append(h_b)

        ys = [self.linear2(F.relu(self.linear1(F.concat([h_f, h_b]))))
                for h_f, h_b in zip(hs_f, reversed(hs_b))]
        return ys
main.py 文件源码 项目:cnn-text-classification 作者: marevol 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __call__(self, x, train=True):
        hlist = []
        h_0 = self['embed'](x)
        if not self.non_static:
            h_0 = Variable(h_0.data)
        h_1 = F.reshape(h_0, (h_0.shape[0], 1, h_0.shape[1], h_0.shape[2]))
        for filter_h in self.filter_sizes:
            pool_size = (self.doc_length - filter_h + 1, 1)
            h = F.max_pooling_2d(F.relu(self['conv' + str(filter_h)](h_1)), pool_size)
            hlist.append(h)
        h = F.concat(hlist)
        pos = 0
        while pos < len(self.hidden_units) - 1:
            h = F.dropout(F.relu(self['l' + str(pos)](h)))
            pos += 1
        y = F.relu(self['l' + str(pos)](h))
        return y
model.py 文件源码 项目:blstm-cws 作者: chantera 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, embeddings, n_labels, dropout=0.5, train=True):
        vocab_size, embed_size = embeddings.shape
        feature_size = embed_size
        super(BLSTMBase, self).__init__(
            embed=L.EmbedID(
                in_size=vocab_size,
                out_size=embed_size,
                initialW=embeddings,
            ),
            f_lstm=LSTM(feature_size, feature_size, dropout),
            b_lstm=LSTM(feature_size, feature_size, dropout),
            linear=L.Linear(feature_size * 2, n_labels),
        )
        self._dropout = dropout
        self._n_labels = n_labels
        self.train = train
mdl_rgb_d.py 文件源码 项目:MultimodalDL 作者: masataka46 项目源码 文件源码 阅读 77 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        self.clear()
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv2(h))), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
model.py 文件源码 项目:chainer-qrnn 作者: musyoku 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def encode(self, X, skip_mask=None):
        batchsize = X.shape[0]
        seq_length = X.shape[1]
        enmbedding = self.encoder_embed(X)
        enmbedding = F.swapaxes(enmbedding, 1, 2)

        out_data = self._forward_encoder_layer(0, enmbedding, skip_mask=skip_mask)
        in_data = [out_data]

        for layer_index in range(1, self.num_layers):
            out_data = self._forward_encoder_layer(layer_index, F.concat(in_data) if self.densely_connected else in_data[-1], skip_mask=skip_mask)
            in_data.append(out_data)

        out_data = F.concat(in_data) if self.densely_connected else in_data[-1] # dense conv

        if self.using_dropout:
            out_data = F.dropout(out_data, ratio=self.dropout)

        last_hidden_states = []
        for layer_index in range(0, self.num_layers):
            encoder = self.get_encoder(layer_index)
            last_hidden_states.append(encoder.get_last_hidden_state())

        return last_hidden_states
model.py 文件源码 项目:chainer-qrnn 作者: musyoku 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, vocab_size, ndim_embedding, num_layers, ndim_h, kernel_size=4, pooling="fo", zoneout=0, dropout=0, weightnorm=False, wgain=1, densely_connected=False, ignore_label=None):
        super(RNNModel, self).__init__(
            embed=L.EmbedID(vocab_size, ndim_embedding, ignore_label=ignore_label),
            fc=L.Convolution1D(ndim_h * num_layers if densely_connected else ndim_h, vocab_size, ksize=1, stride=1, pad=0, weightnorm=weightnorm, initialW=initializers.Normal(math.sqrt(wgain / ndim_h)))
        )
        assert num_layers > 0
        self.vocab_size = vocab_size
        self.ndim_embedding = ndim_embedding
        self.num_layers = num_layers
        self.ndim_h = ndim_h
        self.kernel_size = kernel_size
        self.pooling = pooling
        self.zoneout = zoneout
        self.weightnorm = weightnorm
        self.using_dropout = True if dropout > 0 else False
        self.dropout = dropout
        self.wgain = wgain
        self.ignore_label = ignore_label
        self.densely_connected = densely_connected

        with self.init_scope():
            setattr(self, "qrnn0", L.QRNN(ndim_embedding, ndim_h, kernel_size=kernel_size, pooling=pooling, zoneout=zoneout, weightnorm=weightnorm, wgain=wgain))
            for i in range(1, num_layers):
                setattr(self, "qrnn{}".format(i), L.QRNN(ndim_h * i if densely_connected else ndim_h, ndim_h, kernel_size=kernel_size, pooling=pooling, zoneout=zoneout, weightnorm=weightnorm, wgain=wgain))
model.py 文件源码 项目:chainer-qrnn 作者: musyoku 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __call__(self, X, return_last=False):
        batchsize = X.shape[0]
        seq_length = X.shape[1]
        enmbedding = self.embed(X)
        enmbedding = F.swapaxes(enmbedding, 1, 2)

        out_data = self._forward_layer(0, enmbedding)
        in_data = [out_data]

        for layer_index in range(1, self.num_layers):
            out_data = self._forward_layer(layer_index, F.concat(in_data) if self.densely_connected else in_data[-1])   # dense conv
            in_data.append(out_data)

        out_data = F.concat(in_data) if self.densely_connected else out_data    # dense conv

        if return_last:
            out_data = out_data[:, :, -1, None]

        if self.using_dropout:
            out_data = F.dropout(out_data, ratio=self.dropout)

        out_data = self.fc(out_data)
        out_data = F.reshape(F.swapaxes(out_data, 1, 2), (-1, self.vocab_size))

        return out_data
model.py 文件源码 项目:self-driving-cars 作者: musyoku 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def forward_one_step(self, x, test):
        f = activations[self.activation_function]
        chain = [x]

        # Hidden layers
        for i in range(self.n_hidden_layers):
            u = getattr(self, "layer_%i" % i)(chain[-1])
            if self.apply_batchnorm:
                if i == 0 and self.apply_batchnorm_to_input is False:
                    pass
                else:
                    u = getattr(self, "batchnorm_%i" % i)(u, test=test)
            output = f(u)
            if self.apply_dropout:
                output = F.dropout(output, train=not test)
            chain.append(output)

        # Output
        u = getattr(self, "layer_%i" % self.n_hidden_layers)(chain[-1])
        if self.apply_batchnorm:
            u = getattr(self, "batchnorm_%i" % self.n_hidden_layers)(u, test=test)
        chain.append(f(u))

        return chain[-1]
alexbn.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __call__(self, x, t):
        self.clear()
        h = self.bn1(self.conv1(x), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = self.bn2(self.conv2(h), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
net.py 文件源码 项目:convolutional_seq2seq 作者: soskek 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, n_layers, n_units, width=3, dropout=0.2):
        super(ConvGLUDecoder, self).__init__()
        links = [('l{}'.format(i + 1),
                  ConvGLU(n_units, width=width,
                          dropout=dropout, nopad=True))
                 for i in range(n_layers)]
        for link in links:
            self.add_link(*link)
        self.conv_names = [name for name, _ in links]
        self.width = width

        init_preatt = VarInNormal(1.)
        links = [('preatt{}'.format(i + 1),
                  L.Linear(n_units, n_units, initialW=init_preatt))
                 for i in range(n_layers)]
        for link in links:
            self.add_link(*link)
        self.preatt_names = [name for name, _ in links]
net.py 文件源码 项目:convolutional_seq2seq 作者: soskek 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_units,
                 max_length=50, dropout=0.2, width=3):
        init_emb = chainer.initializers.Normal(0.1)
        init_out = VarInNormal(1.)
        super(Seq2seq, self).__init__(
            embed_x=L.EmbedID(n_source_vocab, n_units, ignore_label=-1,
                              initialW=init_emb),
            embed_y=L.EmbedID(n_target_vocab, n_units, ignore_label=-1,
                              initialW=init_emb),
            embed_position_x=L.EmbedID(max_length, n_units,
                                       initialW=init_emb),
            embed_position_y=L.EmbedID(max_length, n_units,
                                       initialW=init_emb),
            encoder=ConvGLUEncoder(n_layers, n_units, width, dropout),
            decoder=ConvGLUDecoder(n_layers, n_units, width, dropout),
            W=L.Linear(n_units, n_target_vocab, initialW=init_out),
        )
        self.n_layers = n_layers
        self.n_units = n_units
        self.n_target_vocab = n_target_vocab
        self.max_length = max_length
        self.width = width
        self.dropout = dropout
alex_net.py 文件源码 项目:DeepPoseComparison 作者: ynaka81 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def predict(self, x):
        """ Predict 2D pose from image. """
        # layer1
        h = F.relu(self.conv1(x))
        h = F.max_pooling_2d(h, 3, stride=2)
        # layer2
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(h, 3, stride=2)
        # layer3-5
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.relu(self.conv5(h))
        h = F.max_pooling_2d(h, 3, stride=2)
        # layer6-8
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)
        return F.reshape(h, (-1, self.Nj, 2))
CNN.py 文件源码 项目:vsmlib 作者: undertherain 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __call__(self, xs):

        if self.freeze:
            self.embed.disable_update()
        xs = self.embed(xs)
        batchsize, height, width = xs.shape
        xs = F.reshape(xs, (batchsize, 1, height, width))
        conv3_xs = self.conv3(xs)
        conv4_xs = self.conv4(xs)
        conv5_xs = self.conv5(xs)
        h1 = F.max_pooling_2d(F.relu(conv3_xs), conv3_xs.shape[2])
        h2 = F.max_pooling_2d(F.relu(conv4_xs), conv4_xs.shape[2])
        h3 = F.max_pooling_2d(F.relu(conv5_xs), conv5_xs.shape[2])
        concat_layer = F.concat([h1, h2, h3], axis=1)
        with chainer.using_config('train', True):
            y = self.l1(F.dropout(F.tanh(concat_layer)))
        return y
net.py 文件源码 项目:chainer-cifar 作者: dsanno 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __call__(self, x):
        h = self.l0(x)
        h = self.l1_1(h)
        h = self.l1_2(h)
        h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
        h = self.l2_1(h)
        h = self.l2_2(h)
        h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
        h = self.l3_1(h)
        h = self.l3_2(h)
        h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
        h = self.l4_1(h)
        h = self.l4_2(h)
        h = F.dropout(h, 0.25)
        h = F.average_pooling_2d(h, 4, 1, 0)
        h = self.fc(h)
        return h
net.py 文件源码 项目:chainer-cifar 作者: dsanno 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __call__(self, x):
        h = self.bconv1_1(x)
        h = self.bconv1_2(h)
        h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
        h = self.bconv2_1(h)
        h = self.bconv2_2(h)
        h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
        h = self.bconv3_1(h)
        h = self.bconv3_2(h)
        h = self.bconv3_3(h)
        h = self.bconv3_4(h)
        h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
        h = F.relu(self.fc4(F.dropout(h)))
        h = F.relu(self.fc5(F.dropout(h)))
        h = self.fc6(h)
        return h


问题


面经


文章

微信
公众号

扫码关注公众号