python类ones_like()的实例源码

metrics.py 文件源码 项目:iterative_inference_segm 作者: adri-romsor 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def crossentropy(y_pred, y_true, void_labels, one_hot=False):
    # Clip predictions
    y_pred = T.clip(y_pred, _EPSILON, 1.0 - _EPSILON)

    if one_hot:
        y_true = T.argmax(y_true, axis=1)

    # Create mask
    mask = T.ones_like(y_true, dtype=_FLOATX)
    for el in void_labels:
        mask = T.set_subtensor(mask[T.eq(y_true, el).nonzero()], 0.)

    # Modify y_true temporarily
    y_true_tmp = y_true * mask
    y_true_tmp = y_true_tmp.astype('int32')

    # Compute cross-entropy
    loss = T.nnet.categorical_crossentropy(y_pred, y_true_tmp)

    # Compute masked mean loss
    loss *= mask
    loss = T.sum(loss) / T.sum(mask)

    return loss
MoGNADE.py 文件源码 项目:NADE 作者: MarcCote 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def sym_logdensity(self, x):
        """ x is a matrix of column datapoints (VxB) V = n_visible, B = batch size """
        def density_given_previous_a_and_x(x, w, V_alpha, b_alpha, V_mu, b_mu, V_sigma, b_sigma, activations_factor, p_prev, a_prev, x_prev):
            a = a_prev + T.dot(T.shape_padright(x_prev, 1), T.shape_padleft(w, 1))
            h = self.nonlinearity(a * activations_factor)  # BxH

            Alpha = T.nnet.softmax(T.dot(h, V_alpha) + T.shape_padleft(b_alpha))  # BxC
            Mu = T.dot(h, V_mu) + T.shape_padleft(b_mu)  # BxC
            Sigma = T.exp((T.dot(h, V_sigma) + T.shape_padleft(b_sigma)))  # BxC
            p = p_prev + log_sum_exp(-constantX(0.5) * T.sqr((Mu - T.shape_padright(x, 1)) / Sigma) - T.log(Sigma) - constantX(0.5 * np.log(2 * np.pi)) + T.log(Alpha))
            return (p, a, x)
        # First element is different (it is predicted from the bias only)
        a0 = T.zeros_like(T.dot(x.T, self.W))  # BxH
        p0 = T.zeros_like(x[0])
        x0 = T.ones_like(x[0])
        ([ps, _as, _xs], updates) = theano.scan(density_given_previous_a_and_x,
                                                sequences=[x, self.W, self.V_alpha, self.b_alpha, self.V_mu, self.b_mu, self.V_sigma, self.b_sigma, self.activation_rescaling],
                                                outputs_info=[p0, a0, x0])
        return (ps[-1], updates)
BernoulliNADE.py 文件源码 项目:NADE 作者: MarcCote 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def sym_logdensity(self, x):
        """ x is a matrix of column datapoints (VxB) V = n_visible, B = batch size """
        def density_given_previous_a_and_x(x, w, v, b, activations_factor, p_prev, a_prev, x_prev):
            a = a_prev + T.dot(T.shape_padright(x_prev, 1), T.shape_padleft(w, 1))
            h = self.nonlinearity(a * activations_factor)  # BxH
            t = T.dot(h, v) + b
            p_xi_is_one = T.nnet.sigmoid(t) * constantX(0.9999) + constantX(0.0001 * 0.5)  # Make logistic regression more robust by having the sigmoid saturate at 0.00005 and 0.99995
            p = p_prev + x * T.log(p_xi_is_one) + (1 - x) * T.log(1 - p_xi_is_one)
            return (p, a, x)
        # First element is different (it is predicted from the bias only)
        a0 = T.zeros_like(T.dot(x.T, self.W))  # BxH
        p0 = T.zeros_like(x[0])
        x0 = T.ones_like(x[0])
        ([ps, _, _], updates) = theano.scan(density_given_previous_a_and_x,
                                            sequences=[x, self.W, self.V, self.b, self.activation_rescaling],
                                            outputs_info=[p0, a0, x0])
        return (ps[-1], updates)
MoLaplaceNADE.py 文件源码 项目:NADE 作者: MarcCote 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def sym_logdensity(self, x):
        """ x is a matrix of column datapoints (VxB) V = n_visible, B = batch size """
        def density_given_previous_a_and_x(x, w, V_alpha, b_alpha, V_mu, b_mu, V_sigma, b_sigma, activations_factor, p_prev, a_prev, x_prev):
            a = a_prev + T.dot(T.shape_padright(x_prev, 1), T.shape_padleft(w, 1))
            h = self.nonlinearity(a * activations_factor)  # BxH

            Alpha = T.nnet.softmax(T.dot(h, V_alpha) + T.shape_padleft(b_alpha))  # BxC
            Mu = T.dot(h, V_mu) + T.shape_padleft(b_mu)  # BxC
            Sigma = T.exp((T.dot(h, V_sigma) + T.shape_padleft(b_sigma)))  # BxC
            p = p_prev + log_sum_exp(T.log(Alpha) - T.log(2 * Sigma) - T.abs_(Mu - T.shape_padright(x, 1)) / Sigma)
            return (p, a, x)
        # First element is different (it is predicted from the bias only)
        a0 = T.zeros_like(T.dot(x.T, self.W))  # BxH
        p0 = T.zeros_like(x[0])
        x0 = T.ones_like(x[0])
        ([ps, _as, _xs], updates) = theano.scan(density_given_previous_a_and_x,
                                                sequences=[x, self.W, self.V_alpha, self.b_alpha, self.V_mu, self.b_mu, self.V_sigma, self.b_sigma, self.activation_rescaling],
                                                outputs_info=[p0, a0, x0])
        return (ps[-1], updates)
bricks.py 文件源码 项目:Associative_LSTM 作者: mohammadpz 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def apply(self, inputs, states, cells, mask=None):
        def slice_last(x, no):
            return x[:, no * self.dim: (no + 1) * self.dim]

        activation = tensor.dot(states, self.W_state) + inputs
        in_gate = self.gate_activation.apply(
            slice_last(activation, 0))
        pre = slice_last(activation, 1)
        forget_gate = self.gate_activation.apply(
            pre + self.bias * tensor.ones_like(pre))
        next_cells = (
            forget_gate * cells +
            in_gate * self.activation.apply(slice_last(activation, 2)))
        out_gate = self.gate_activation.apply(
            slice_last(activation, 3))
        next_states = out_gate * self.activation.apply(next_cells)

        if mask:
            next_states = (mask[:, None] * next_states +
                           (1 - mask[:, None]) * states)
            next_cells = (mask[:, None] * next_cells +
                          (1 - mask[:, None]) * cells)

        return next_states, next_cells
rl.py 文件源码 项目:anirban-imitation 作者: Santara 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _make_actiondist_ops(self, obsfeat_B_Df):
        # Computes action distribution mean (of a Gaussian) using MLP
        with nn.variable_scope('hidden'):
            net = nn.FeedforwardNet(obsfeat_B_Df, (self.obsfeat_space.dim,), self.cfg.hidden_spec)
        with nn.variable_scope('out'):
            mean_layer = nn.AffineLayer(net.output, net.output_shape, (self.action_space.dim,), initializer=np.zeros((net.output_shape[0], self.action_space.dim)))
            assert mean_layer.output_shape == (self.action_space.dim,)
        means_B_Da = mean_layer.output

        # Action distribution log standard deviations are parameters themselves
        logstdevs_1_Da = nn.get_variable('logstdevs_1_Da', np.full((1, self.action_space.dim), self.cfg.init_logstdev), broadcastable=(True,False))
        stdevs_1_Da = self.cfg.min_stdev + tensor.exp(logstdevs_1_Da) # minimum stdev seems to make density / kl computations more stable
        stdevs_B_Da = tensor.ones_like(means_B_Da)*stdevs_1_Da # "broadcast" to (B,Da)

        actiondist_B_Pa = tensor.concatenate([means_B_Da, stdevs_B_Da], axis=1)
        return actiondist_B_Pa
LSTM1DHiddenInitLayer.py 文件源码 项目:deep-motion-analysis 作者: Brimborough 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __call__(self, input):

        b_size = input.shape[0]

        input_data = input[:(b_size/2)]
        initial = input[(b_size/2):]

        input_data = input_data.dimshuffle(2, 0, 1)
        initial = initial.dimshuffle(2, 0, 1)

        me = self.dropout(T.ones_like(input_data[0]))
        mh = self.dropout(T.ones_like(self.encoder(input_data[0])))   

        def step(e, h, me, mh):
            ig = sigmoid(self.encode_igate(me * e) + self.recode_igate(mh * h))
            fg = sigmoid(self.encode_fgate(me * e) + self.recode_fgate(mh * h))
            return self.activation(fg * self.recoder(mh * h) + ig * self.encoder(me * e))

        h = theano.scan(step, sequences=[input_data, initial], non_sequences=[me, mh], outputs_info=None)[0]

        return h.dimshuffle(1, 2, 0)
LSTM1DLayer.py 文件源码 项目:deep-motion-analysis 作者: Brimborough 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __call__(self, input):

        b_size = input.shape[0]

        input_data = input[:(b_size/2)]
        initial = input[(b_size/2):]

        input_data = input_data.dimshuffle(2, 0, 1)
        initial = initial.dimshuffle(2, 0, 1)

        me = self.dropout1(T.ones_like(input_data[0]))
        mh = self.dropout2(T.ones_like(self.encoder(input_data[0])))   

        def step(e, h, me, mh):
            ig = sigmoid(self.encode_igate(me * e) + self.recode_igate(mh * h))
            fg = sigmoid(self.encode_fgate(me * e) + self.recode_fgate(mh * h))
            return self.activation(fg * self.recoder(mh * h) + ig * self.encoder(me * e))

        h = theano.scan(step, sequences=[input_data, initial], non_sequences=[me, mh], outputs_info=None)[0]

        return h.dimshuffle(1, 2, 0)
LSTM1DLayerWithInit.py 文件源码 项目:deep-motion-analysis 作者: Brimborough 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __call__(self, input):

        b_size = input.shape[0]

        input_data = input[:(b_size/2)]
        initial = input[(b_size/2):]

        input_data = input_data.dimshuffle(2, 0, 1)
        initial = initial.dimshuffle(2, 0, 1)

        me = self.dropout(T.ones_like(input_data[0]))
        mh = self.dropout(T.ones_like(self.encoder(input_data[0])))   

        def step(e, h, me, mh):
            ig = sigmoid(self.encode_igate(me * e) + self.recode_igate(mh * h))
            fg = sigmoid(self.encode_fgate(me * e) + self.recode_fgate(mh * h))
            return self.activation(fg * self.recoder(mh * h) + ig * self.encoder(me * e))

        h = theano.scan(step, sequences=[input_data, initial], non_sequences=[me, mh], outputs_info=None)[0]

        return h.dimshuffle(1, 2, 0)
cnn_lstm.py 文件源码 项目:Sequences-With-Sentences 作者: airalcorn2 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def forward_prop_step(x_t, sentence_t, s_t1_prev, s_t2_prev):

    filtered_words = T.tanh(F.dot(sentence_t) + d)
    pooled_words = filtered_words.max(axis = 1)

    x_e = T.concatenate([x_t, pooled_words])

    # GRU Layer 1.
    z_t1 = T.nnet.hard_sigmoid(U_1[0].dot(x_e) + W[0].dot(s_t1_prev) + b[0])
    r_t1 = T.nnet.hard_sigmoid(U_1[1].dot(x_e) + W[1].dot(s_t1_prev) + b[1])
    c_t1 = T.tanh(U_1[2].dot(x_e) + W[2].dot(s_t1_prev * r_t1) + b[2])
    s_t1 = (T.ones_like(z_t1) - z_t1) * c_t1 + z_t1 * s_t1_prev

    # GRU Layer 2.
    z_t2 = T.nnet.hard_sigmoid(U_2[0].dot(s_t1) + W[3].dot(s_t2_prev) + b[3])
    r_t2 = T.nnet.hard_sigmoid(U_2[1].dot(s_t1) + W[4].dot(s_t2_prev) + b[4])
    c_t2 = T.tanh(U_2[2].dot(s_t1) + W[5].dot(s_t2_prev * r_t2) + b[5])
    s_t2 = (T.ones_like(z_t2) - z_t2) * c_t2 + z_t2 * s_t2_prev

    # Final output calculation.
    o_t = T.nnet.sigmoid(V.dot(s_t2) + c)

    return [o_t, s_t1, s_t2]
theano_backend.py 文件源码 项目:keras_superpixel_pooling 作者: parag2489 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_train is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_train'):
        return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)

    if gamma is None:
        if beta is None:
            gamma = ones_like(x)
        else:
            gamma = ones_like(beta)
    if beta is None:
        if gamma is None:
            beta = zeros_like(x)
        beta = zeros_like(gamma)

    normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
        x, gamma, beta, reduction_axes, epsilon)

    return normed, mean, T.inv(stdinv ** 2)
theano_backend.py 文件源码 项目:keras_superpixel_pooling 作者: parag2489 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
    """Apply batch normalization on x given mean, var, beta and gamma.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_test is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_test'):
        return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)

    if gamma is None:
        gamma = ones_like(var)
    if beta is None:
        beta = zeros_like(mean)

    if mean.ndim == 1:
        # based on TensorFlow's default: normalize along rightmost dimension
        reduction_axes = list(range(x.ndim - 1))
    else:
        reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]

    return T.nnet.bn.batch_normalization_test(
        x, gamma, beta, mean, var, reduction_axes, epsilon)


# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
test_basic_ops.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_gpujoin_gpualloc():
    a = T.fmatrix('a')
    a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
    b = T.fmatrix('b')
    b_val = numpy.asarray(numpy.random.rand(3, 5), dtype='float32')

    f = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)) + 4,
                        mode=mode_without_gpu)
    f_gpu = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)),
                            mode=mode_with_gpu)
    f_gpu2 = theano.function([a, b], T.join(0, T.zeros_like(a),
                                            T.ones_like(b)) + 4,
                             mode=mode_with_gpu)
    assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 2
    assert sum([node.op == T.join for node in f.maker.fgraph.toposort()]) == 1
    assert sum([isinstance(node.op, GpuAlloc)
                for node in f_gpu.maker.fgraph.toposort()]) == 2
    assert sum([node.op == gpu_join
                for node in f_gpu.maker.fgraph.toposort()]) == 1
    assert sum([isinstance(node.op, GpuAlloc)
                for node in f_gpu2.maker.fgraph.toposort()]) == 2
    assert sum([node.op == gpu_join
                for node in f_gpu2.maker.fgraph.toposort()]) == 1
    assert numpy.allclose(f(a_val, b_val), f_gpu2(a_val, b_val))
test_basic.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 51 收藏 0 点赞 0 评论 0
def test_int32_dtype(self):
        # Reported on the theano-user mailing-list:
        # https://groups.google.com/d/msg/theano-users/MT9ui8LtTsY/rwatwEF9zWAJ
        size = 9
        intX = 'int32'

        C = tensor.matrix('C', dtype=intX)
        I = tensor.matrix('I', dtype=intX)

        fI = I.flatten()
        data = tensor.ones_like(fI)
        indptr = tensor.arange(data.shape[0] + 1, dtype='int32')

        m1 = sparse.CSR(data, fI, indptr, (8, size))
        m2 = sparse.dot(m1, C)
        y = m2.reshape(shape=(2, 4, 9), ndim=3)

        f = theano.function(inputs=[I, C], outputs=y)
        i = numpy.asarray([[4, 3, 7, 7], [2, 8, 4, 5]], dtype=intX)
        a = numpy.asarray(numpy.random.randint(0, 100, (size, size)),
                          dtype=intX)
        f(i, a)
test_basic.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_structured_add_s_v(self):
        sp_types = {'csc': sp.csc_matrix,
                    'csr': sp.csr_matrix}

        for format in ['csr', 'csc']:
            for dtype in ['float32', 'float64']:
                x = theano.sparse.SparseType(format, dtype=dtype)()
                y = tensor.vector(dtype=dtype)
                f = theano.function([x, y], structured_add_s_v(x, y))

                spmat = sp_types[format](random_lil((4, 3), dtype, 3))
                spones = spmat.copy()
                spones.data = numpy.ones_like(spones.data)
                mat = numpy.asarray(numpy.random.rand(3), dtype=dtype)

                out = f(spmat, mat)

                utt.assert_allclose(as_ndarray(spones.multiply(spmat + mat)),
                                    out.toarray())
basic.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def sp_ones_like(x):
    """
    Construct a sparse matrix of ones with the same sparsity pattern.

    Parameters
    ----------
    x
        Sparse matrix to take the sparsity pattern.

    Returns
    -------
    A sparse matrix
        The same as `x` with data changed for ones.

    """
    # TODO: don't restrict to CSM formats
    data, indices, indptr, shape = csm_properties(x)
    return CSM(format=x.format)(tensor.ones_like(data), indices, indptr, shape)
test_scan_checkpoints.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def setUp(self):
        self.k = T.iscalar("k")
        self.A = T.vector("A")
        result, _ = theano.scan(
            fn=lambda prior_result, A: prior_result * A,
            outputs_info=T.ones_like(self.A),
            non_sequences=self.A,
            n_steps=self.k)
        result_check, _ = theano.scan_checkpoints(
            fn=lambda prior_result, A: prior_result * A,
            outputs_info=T.ones_like(self.A),
            non_sequences=self.A,
            n_steps=self.k,
            save_every_N=100)
        self.result = result[-1]
        self.result_check = result_check[-1]
        self.grad_A = T.grad(self.result.sum(), self.A)
        self.grad_A_check = T.grad(self.result_check.sum(), self.A)
test_basic_ops.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_gpualloc_output_to_gpu():
    a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
    a = tcn.shared_constructor(a_val)

    b = T.fscalar()
    f = theano.function([b], T.ones_like(a) + b, mode=mode_without_gpu)
    f_gpu = theano.function([b], B.gpu_from_host(T.ones_like(a)) + b,
                            mode=mode_with_gpu)

    f(2)
    f_gpu(2)

    assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 1
    assert sum([node.op == B.gpu_alloc
                for node in f_gpu.maker.fgraph.toposort()]) == 1

    assert numpy.allclose(numpy.ones(a.get_value(borrow=True).shape) + 9,
                          f_gpu(9))
    assert numpy.allclose(f(5), f_gpu(5))
ctc_base.py 文件源码 项目:theano_ctc 作者: mcf06 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def make_node(self, acts, labels, input_lengths):
    # Unless specified, assume all sequences have full sequence length, i.e. acts_.shape[0]
    if input_lengths == None:
      input_lengths = T.cast(acts.shape[0], dtype="int32") * T.ones_like(acts[0,:,0], dtype=np.int32)

    # acts.shape = [seqLen, batchN, outputUnit]
    if acts.dtype != "float32":
      raise Exception("acts must be float32 instead of %s" % acts.dtype)
    # labels.shape = [batchN, labelLen]
    if labels.dtype != "int32":
      raise Exception("labels must be int32 instead of %s" % labels.dtype)
    # input_lengths.shape = [batchN]
    if input_lengths.dtype != "int32":
      raise Exception("input_lengths must be int32 instead of %s" % input_lengths.dtype)

    applyNode = theano.Apply(self, inputs=[acts, input_lengths, labels], outputs=[self.costs, self.gradients])

    # Return only the cost. Gradient will be returned by grad()
    self.default_output = 0 

    return applyNode
spatial_transformer.py 文件源码 项目:statestream 作者: VolkerFischer 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def _meshgrid(height, width):
    # This should be equivalent to:
    #  x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
    #                         np.linspace(-1, 1, height))
    #  ones = np.ones(np.prod(x_t.shape))
    #  grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
    x_t = T.dot(T.ones((height, 1)),
                _linspace(-1.0, 1.0, width).dimshuffle('x', 0))
    y_t = T.dot(_linspace(-1.0, 1.0, height).dimshuffle(0, 'x'),
                T.ones((1, width)))

    x_t_flat = x_t.reshape((1, -1))
    y_t_flat = y_t.reshape((1, -1))
    ones = T.ones_like(x_t_flat)
    grid = T.concatenate([x_t_flat, y_t_flat, ones], axis=0)
    return grid
recurrent.py 文件源码 项目:deep-coref 作者: clarkkev 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_padded_shuffled_mask(self, train, X, pad=0):
        mask = self.get_input_mask(train)
        if mask is None:
            mask = T.ones_like(X.sum(axis=-1))  # is there a better way to do this without a sum?

        # mask is (nb_samples, time)
        mask = T.shape_padright(mask)  # (nb_samples, time, 1)
        mask = T.addbroadcast(mask, -1)  # the new dimension (the '1') is made broadcastable
        # see http://deeplearning.net/software/theano/library/tensor/basic.html#broadcasting-in-theano-vs-numpy
        mask = mask.dimshuffle(1, 0, 2)  # (time, nb_samples, 1)

        if pad > 0:
            # left-pad in time with 0
            padding = alloc_zeros_matrix(pad, mask.shape[1], 1)
            mask = T.concatenate([padding, mask], axis=0)
        return mask.astype('int8')
mylayers.py 文件源码 项目:iterative_inference_segm 作者: adri-romsor 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_output_for(self, upscaled, **kwargs):
        a, b = self.scale_factor
        # get output for pooling and pre-pooling layer
        inp, out =\
                lasagne.layers.get_output([self.pool2d_layer_in,
                                           self.pool2d_layer])
        # upscale the input feature map by scale_factor
        if b > 1:
            upscaled = T.extra_ops.repeat(upscaled, b, 3)
        if a > 1:
            upscaled = T.extra_ops.repeat(upscaled, a, 2)
        # get the shapes for pre-pooling layer and upscaled layer
        sh_pool2d_in = T.shape(inp)
        sh_upscaled = T.shape(upscaled)
        # in case the shape is different left-bottom-pad with zero
        tmp = T.zeros(sh_pool2d_in)

        indx = (slice(None),
                slice(None),
                slice(0, sh_upscaled[2]),
                slice(0, sh_upscaled[3]))
        upscaled = T.set_subtensor(tmp[indx], upscaled)
        # get max pool indices
        indices_pool = T.grad(None, wrt=inp,
                known_grads={out: T.ones_like(out)})
        # mask values using indices_pool
        f = indices_pool * upscaled
        return f
metrics.py 文件源码 项目:iterative_inference_segm 作者: adri-romsor 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def accuracy(y_pred, y_true, void_labels, one_hot=False):

    assert (y_pred.ndim == 2) or (y_pred.ndim == 1)

    # y_pred to indices
    if y_pred.ndim == 2:
        y_pred = T.argmax(y_pred, axis=1)

    if one_hot:
        y_true = T.argmax(y_true, axis=1)

    # Compute accuracy
    acc = T.eq(y_pred, y_true).astype(_FLOATX)

    # Create mask
    mask = T.ones_like(y_true, dtype=_FLOATX)
    for el in void_labels:
        indices = T.eq(y_true, el).nonzero()
        if any(indices):
            mask = T.set_subtensor(mask[indices], 0.)

    # Apply mask
    acc *= mask
    acc = T.sum(acc) / T.sum(mask)

    return acc
nn_lung.py 文件源码 项目:dsb3 作者: EliasVansteenkiste 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_output_for(self, input, **kwargs):
        return T.ones_like(input) * self.constant
theano_backend.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def ones_like(x):
    return T.ones_like(x)
inception.py 文件源码 项目:kaggle-right-whale 作者: felixlaumon 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def avg_pool(input_layer, **kwargs):
    # hack to work around https://github.com/Theano/Theano/issues/3776
    norm = nn.layers.ExpressionLayer(input_layer, lambda X: T.ones_like(X))
    norm = nn.layers.Pool2DLayer(norm, mode='average_inc_pad', **kwargs)
    l = nn.layers.Pool2DLayer(input_layer, mode='average_inc_pad', **kwargs)
    l = nn.layers.ElemwiseMergeLayer([l, norm], T.true_div)
    return l
model.py 文件源码 项目:epfl-semester-project-biaxialnn 作者: onanypoint 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def loss_func(self, y_true, y_predict):
        active_notes = T.shape_padright(y_true[:,:,:,0])
        mask = T.concatenate([T.ones_like(active_notes), active_notes, T.repeat(T.ones_like(active_notes), self.output_size-2, -1)], axis=-1)
        loglikelihoods = mask * T.log( 2*y_predict*y_true - y_predict - y_true + 1 + self.epsilon )
        return T.neg(T.sum(loglikelihoods))
relu.py 文件源码 项目:pl-cnn 作者: oval-group 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def mirror_activations(input, input_fixed):

    out_fixed = T.nnet.relu(input_fixed)
    mask = T.grad(cost=None,
                  wrt=input_fixed,
                  known_grads={out_fixed: T.ones_like(out_fixed)})

    out = input * mask

    return out, out_fixed
maxpool.py 文件源码 项目:pl-cnn 作者: oval-group 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def mirror_activations(input, input_fixed, pool_size):

        out_fixed = my_pool_2d(input_fixed, ds=pool_size, ignore_border=True)
        mask = T.grad(cost=None,
                      wrt=input_fixed,
                      known_grads={out_fixed: T.ones_like(out_fixed)})

        masked_input = input * mask
        out = Cfg.floatX(pool_size[0] * pool_size[1]) * \
            pool_2d(masked_input, mode='average_exc_pad', ds=pool_size,
                    ignore_border=True)

        return out, out_fixed
gru.py 文件源码 项目:theano-recurrence 作者: uyaseen 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def generative_sampling(self, seed, emb_data, sample_length):
        fruit = theano.shared(value=seed)

        def step(h_tm, y_tm):

            x_z = T.dot(emb_data[y_tm], self.W_z) + self.b_z
            x_r = T.dot(emb_data[y_tm], self.W_r) + self.b_r
            x_h = T.dot(emb_data[y_tm], self.W) + self.b_h

            z_t = self.inner_activation(x_z + T.dot(h_tm, self.U_z))
            r_t = self.inner_activation(x_r + T.dot(h_tm, self.U_r))
            hh_t = self.activation(x_h + T.dot(r_t * h_tm, self.U))
            h_t = (T.ones_like(z_t) - z_t) * hh_t + z_t * h_tm

            y_t = T.nnet.softmax(T.dot(h_t, self.V) + self.b_y)
            y = T.argmax(y_t, axis=1)

            return h_t, y[0]

        [_, samples], _ = theano.scan(fn=step,
                                      outputs_info=[self.h0, fruit],
                                      n_steps=sample_length)

        get_samples = theano.function(inputs=[],
                                      outputs=samples)

        return get_samples()


问题


面经


文章

微信
公众号

扫码关注公众号