python类reshape()的实例源码

S2S_att.py 文件源码 项目:seq2seq_temporal_attention 作者: aistairc 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, a_list, state, batch_size, xp):
        e_list = []
        sum_e = xp.zeros((batch_size, 1), dtype=xp.float32)
        for a in a_list:
            w = reshape(batch_matmul(state['h2'], a, transa=True), (batch_size, 1))
            w.data = xp.clip(w.data, -40, 40)
            e = exp(w)
            e_list.append(e)
            sum_e = sum_e + e

        context = xp.zeros((batch_size, self.hidden_size), dtype=xp.float32)

        for a, e in zip(a_list, e_list):
            e /= sum_e
            context = context + reshape(batch_matmul(a, e), (batch_size, self.hidden_size))
        return context, e_list, sum_e
model.py 文件源码 项目:teras 作者: chantera 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self, x, hs):
        batch, dim = x.shape
        alphas = 0
        _sum = 0
        for h in F.transpose_sequence(hs[:batch]):
            size = h.shape[0]
            if size < batch:
                h = F.vstack([h, variable.Variable(
                    self.xp.zeros((batch - size, h.shape[1]), dtype='f'))])
            score = self._score_func(x, h)
            e = F.exp(score)
            _sum += e
            alphas += batch_matmul(h, e)
        c = F.reshape(batch_matmul(F.reshape(alphas, (batch, dim)),
                                   (1 / _sum)), (batch, dim))
        return c
model.py 文件源码 项目:teras 作者: chantera 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __call__(self, chars):
        if not isinstance(chars, (tuple, list)):
            chars = [chars]
        char_ids, boundaries = self._create_sequence(chars)
        x = self.embed(self.xp.array(char_ids))
        x = F.dropout(x, self._dropout)
        length, dim = x.shape
        C = self.conv(F.reshape(x, (1, 1, length, dim)))
        # C.shape -> (1, out_size, length, 1)
        C = F.split_axis(F.transpose(F.reshape(C, (self.out_size, length))),
                         boundaries, axis=0)
        ys = F.max(F.pad_sequence(
            [matrix for i, matrix in enumerate(C) if i % 2 == 1],
            padding=-np.inf), axis=1)  # max over time pooling
        # assert len(chars) == ys.shape[0]
        return ys
model.py 文件源码 项目:teras 作者: chantera 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __call__(self, x1, x2):
        xp = self.xp
        out_size = self.out_size
        batch_size, len1, dim1 = x1.shape
        if not self.nobias[0]:
            x1 = F.concat((x1, xp.ones((batch_size, len1, 1),
                                       dtype=xp.float32)), axis=2)
            dim1 += 1
        len2, dim2 = x2.shape[1:]
        if not self.nobias[1]:
            x2 = F.concat((x2, xp.ones((batch_size, len2, 1),
                                       dtype=xp.float32)), axis=2)
            dim2 += 1
        x1_reshaped = F.reshape(x1, (batch_size * len1, dim1))
        W_reshaped = F.reshape(F.transpose(self.W, (0, 2, 1)),
                               (dim1, out_size * dim2))
        affine = F.reshape(F.matmul(x1_reshaped, W_reshaped),
                           (batch_size, len1 * out_size, dim2))
        biaffine = F.transpose(
            F.reshape(batch_matmul(affine, x2, transb=True),
                      (batch_size, len1, out_size, len2)),
            (0, 1, 3, 2))
        if not self.nobias[2]:
            biaffine += F.broadcast_to(self.b, biaffine.shape)
        return biaffine
LSTMEncDecAttn.py 文件源码 项目:mlpnlp-nmt 作者: mlpnlp 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def prepareDecoder(self, encInfo):
        self.model.decLSTM.reset_state()
        if self.attn_mode == 0:
            aList = None
        elif self.attn_mode == 1:
            aList = encInfo.attnList
        elif self.attn_mode == 2:
            aList = self.model.attnM(
                chaFunc.reshape(encInfo.attnList,
                                (encInfo.cMBSize * encInfo.encLen, self.hDim)))
            # TODO: ???????encoder???????
        else:
            assert 0, "ERROR"
        xp = cuda.get_array_module(encInfo.lstmVars[0].data)
        finalHS = chainer.Variable(
            xp.zeros(
                encInfo.lstmVars[0].data.shape,
                dtype=xp.float32))  # ???input_feed?0????
        return aList, finalHS

    ############################
multistate_dqn.py 文件源码 项目:chainer_frmqn 作者: okdshin 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def calc_loss(self, state, state_dash, actions, rewards, done_list):
        assert(state.shape == state_dash.shape)
        s = state.reshape((state.shape[0], reduce(lambda x, y: x*y, state.shape[1:]))).astype(np.float32)
        s_dash = state_dash.reshape((state.shape[0], reduce(lambda x, y: x*y, state.shape[1:]))).astype(np.float32)
        q = self.model.q_function(s)

        q_dash = self.model_target.q_function(s_dash)  # Q(s',*)
        max_q_dash = np.asarray(list(map(np.max, q_dash.data)), dtype=np.float32) # max_a Q(s',a)

        target = q.data.copy()
        for i in range(self.replay_batch_size):
            assert(self.replay_batch_size == len(done_list))
            r = np.sign(rewards[i]) if self.clipping else rewards[i]
            if done_list[i]:
                discounted_sum = r
            else:
                discounted_sum = r + self.gamma * max_q_dash[i]
            assert(self.replay_batch_size == len(actions))
            target[i, actions[i]] = discounted_sum

        loss = F.sum(F.huber_loss(Variable(target), q, delta=1.0)) #/ self.replay_batch_size
        return loss, q
models.py 文件源码 项目:chainer-gan-improvements 作者: hvy 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __call__(self, x):
        minibatch_size = x.shape[0]
        activation = F.reshape(self.t(x), (-1, self.n_kernels, self.kernel_dim))
        activation_ex = F.expand_dims(activation, 3)
        activation_ex_t = F.expand_dims(F.transpose(activation, (1, 2, 0)), 0)
        activation_ex, activation_ex_t = F.broadcast(activation_ex, activation_ex_t)
        diff = activation_ex - activation_ex_t

        xp = chainer.cuda.get_array_module(x.data)
        eps = F.expand_dims(xp.eye(minibatch_size, dtype=xp.float32), 1)
        eps = F.broadcast_to(eps, (minibatch_size, self.n_kernels, minibatch_size))
        sum_diff = F.sum(abs(diff), axis=2)
        sum_diff = F.broadcast_to(sum_diff, eps.shape)
        abs_diff = sum_diff + eps

        minibatch_features = F.sum(F.exp(-abs_diff), 2)
        return F.concat((x, minibatch_features), axis=1)
model.py 文件源码 项目:chainer-glu 作者: musyoku 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, X, return_last=False):
        batchsize = X.shape[0]
        seq_length = X.shape[1]
        enmbedding = self.embed(X)
        enmbedding = F.swapaxes(enmbedding, 1, 2)
        residual_input = enmbedding if self.ndim_h == self.ndim_embedding else 0

        out_data = self._forward_layer(0, enmbedding)
        for layer_index in xrange(1, self.num_blocks * self.num_layers_per_block):
            out_data = self._forward_layer(layer_index, out_data)
            if (layer_index + 1) % self.num_layers_per_block == 0:
                if self.using_dropout:
                    out_data = F.dropout(out_data, ratio=self.dropout)
                out_data += residual_input
                residual_input = out_data

        if return_last:
            out_data = out_data[:, :, -1, None]

        out_data = self.dense(out_data)
        out_data = F.reshape(F.swapaxes(out_data, 1, 2), (-1, self.vocab_size))

        return out_data
links.py 文件源码 项目:unrolled-gan 作者: musyoku 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, x):
        xp = chainer.cuda.get_array_module(x.data)
        batchsize = x.shape[0]
        if self.train_weights == False and self.initial_T is not None:
            self.T.W.data = self.initial_T

        M = F.reshape(self.T(x), (-1, self.num_kernels, self.ndim_kernel))
        M = F.expand_dims(M, 3)
        M_T = F.transpose(M, (3, 1, 2, 0))
        M, M_T = F.broadcast(M, M_T)

        norm = F.sum(abs(M - M_T), axis=2)
        eraser = F.broadcast_to(xp.eye(batchsize, dtype=x.dtype).reshape((batchsize, 1, batchsize)), norm.shape)
        c_b = F.exp(-(norm + 1e6 * eraser))
        o_b = F.sum(c_b, axis=2)

        if self.train_weights == False:
            self.initial_T = self.T.W.data

        return F.concat((x, o_b), axis=1)
wavenet.py 文件源码 项目:wavenet 作者: musyoku 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def cross_entropy(self, raw_network_output, target_signal_data):
        if isinstance(target_signal_data, Variable):
            raise Exception("target_signal_data cannot be Variable")

        raw_network_output = self.to_variable(raw_network_output)
        target_width = target_signal_data.shape[1]
        batchsize = raw_network_output.data.shape[0]

        if raw_network_output.data.shape[3] != target_width:
            raise Exception("raw_network_output.width != target.width")

        # (batchsize * time_step,) <- (batchsize, time_step)
        target_signal_data = target_signal_data.reshape((-1,))
        target_signal = self.to_variable(target_signal_data)

        # (batchsize * time_step, channels) <- (batchsize, channels, 1, time_step)
        raw_network_output = F.transpose(raw_network_output, (0, 3, 2, 1))
        raw_network_output = F.reshape(raw_network_output, (batchsize * target_width, -1))

        loss = F.softmax_cross_entropy(raw_network_output, target_signal)
        return loss
googlenet.py 文件源码 项目:googlenet 作者: nutszebra 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __call__(self, x, train=True):
        h = F.relu(self.conv1(x))
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = F.relu(self.conv2_1x1(h))
        h = F.relu(self.conv2_3x3(h))
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = self.inception3a(h)
        h = self.inception3b(h)
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = self.inception4a(h)
        h = self.inception4b(h)
        h = self.inception4c(h)
        h = self.inception4d(h)
        h = self.inception4e(h)
        h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1))
        h = self.inception5a(h)
        h = F.relu(self.inception5b(h))
        num, categories, y, x = h.data.shape
        # global average pooling
        h = F.reshape(F.average_pooling_2d(h, (y, x)), (num, categories))
        h = F.dropout(h, ratio=0.4, train=train)
        h = self.linear(h)
        return h
net.py 文件源码 项目:chainer-cf-nade 作者: dsanno 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __call__(self, h, train=True):
        """
        in_type:
            h: float32
        in_shape:
            h: (batch_size, hidden_num)
        out_type: float32
        out_shape: (batch_size, rating_num, predicted_item_num)
        """

        xp = cuda.get_array_module(h.data)
        h = self.p(h)
        if hasattr(self, 'q'):
            h = self.q(h)
        h = F.reshape(h, (-1, self.rating_num, self.item_num, 1))
        w = chainer.Variable(xp.asarray(np.tri(self.rating_num, dtype=np.float32).reshape(self.rating_num, self.rating_num, 1, 1)), volatile=h.volatile)
        h = F.convolution_2d(h, w)
        return F.reshape(h, (-1, self.rating_num, self.item_num))
discriminators.py 文件源码 项目:chainer-gan-experiments 作者: Aixile 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def __call__(self, x, test=False, retain_forward=False):
        h = self.c_first(x, test=test, retain_forward=retain_forward)
        for i in range(self.down_layers-1):
            h = getattr(self, 'c'+str(i))(h, test=test, retain_forward=retain_forward)
        if not self.conv_as_last:
            _b, _ch, _w, _h = h.data.shape
            self.last_shape=(_b, _ch, _w, _h)
            h = F.reshape(h, (_b, _ch*_w*_h))
        h = self.c_last(h, test=test, retain_forward=retain_forward)
        return h
discriminators.py 文件源码 项目:chainer-gan-experiments 作者: Aixile 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def differentiable_backward(self, g):
        g = self.c_last.differentiable_backward(g)
        if not self.conv_as_last:
            _b, _ch, _w, _h = self.last_shape
            g = F.reshape(g, (_b, _ch, _w, _h))
        for i in reversed(range(self.down_layers-1)):
            g = getattr(self, 'c'+str(i)).differentiable_backward(g)
        g = self.c_first.differentiable_backward(g)
        return g
discriminators.py 文件源码 项目:chainer-gan-experiments 作者: Aixile 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __call__(self, x, test=False, retain_forward=False):
        h = self.c_first(x, test=test, retain_forward=retain_forward)
        for i in range(self.down_layers-1):
            h = getattr(self, 'c'+str(i))(h, test=test, retain_forward=retain_forward)
        _b, _ch, _w, _h = h.data.shape
        self.last_shape=(_b, _ch, _w, _h)
        h = F.reshape(h, (_b, _ch*_w*_h))
        h0 = self.c_last_0(h, test=test, retain_forward=retain_forward)
        h1 = self.c_last_1_0(h, test=test, retain_forward=retain_forward)
        #h1 = self.c_last_1_1(h1, test=test, retain_forward=retain_forward)
        #h1 = self.c_last_1_2(h1, test=test, retain_forward=retain_forward)
        return h0, h1
discriminators.py 文件源码 项目:chainer-gan-experiments 作者: Aixile 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def differentiable_backward(self, g):
        g = self.c_last_0.differentiable_backward(g)
        _b, _ch, _w, _h = self.last_shape
        g = F.reshape(g, (_b, _ch, _w, _h))
        for i in reversed(range(self.down_layers-1)):
            g = getattr(self, 'c'+str(i)).differentiable_backward(g)
        g = self.c_first.differentiable_backward(g)
        return g
generators.py 文件源码 项目:chainer-gan-experiments 作者: Aixile 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __call__(self, z, test=False):
        h = self.c_first(z, test=test)
        h = F.reshape(h, (h.data.shape[0], self.base_size, 4, 4))
        for i in range(self.up_layers):
            h = getattr(self, 'c'+str(i))(h, test=test)
        return h
models.py 文件源码 项目:chainer-gan-experiments 作者: Aixile 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def differentiable_backward(self, g):
        g = self.c_last.differentiable_backward(g)
        _b, _ch, _w, _h = self.last_shape
        g = F.reshape(g, (_b, _ch, _w, _h))
        for i in reversed(range(self.down_layers-1)):
            g = getattr(self, 'c'+str(i)).differentiable_backward(g)
        g = self.c_first.differentiable_backward(g)
        return g
models.py 文件源码 项目:chainer-spatial-transformer-networks 作者: hvy 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def affine_matrix(self, x):
        h = F.max_pooling_2d(x, 2, 2)
        h = F.relu(self.conv1(h))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(h, 2, 2)
        theta = F.reshape(self.fc(h), (x.shape[0], 2, 3))
        return theta
reorg.py 文件源码 项目:chainer-object-detection 作者: dsanno 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def reorg(input, stride=2):
    batch_size, input_channel, input_height, input_width = input.data.shape
    output_height, output_width, output_channel = int(input_height/stride), int(input_width/stride), input_channel*stride*stride
    output = F.transpose(F.reshape(input, (batch_size, input_channel, output_height, stride, output_width, stride)), (0, 1, 2, 4, 3, 5))
    output = F.transpose(F.reshape(output, (batch_size, input_channel, output_height, output_width, -1)), (0, 4, 1, 2, 3))
    output = F.reshape(output, (batch_size, output_channel, output_height, output_width))
    return output


问题


面经


文章

微信
公众号

扫码关注公众号