python类tensor()的实例源码

skipthoughts.py 文件源码 项目:StackGAN 作者: hanzhanggit 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def build_encoder_bi(tparams, options):
    """
    build bidirectional encoder, given pre-computed word embeddings
    """
    # word embedding (source)
    embedding = tensor.tensor3('embedding', dtype='float32')
    embeddingr = embedding[::-1]
    x_mask = tensor.matrix('x_mask', dtype='float32')
    xr_mask = x_mask[::-1]

    # encoder
    proj = get_layer(options['encoder'])[1](tparams, embedding, options,
                                            prefix='encoder',
                                            mask=x_mask)
    projr = get_layer(options['encoder'])[1](tparams, embeddingr, options,
                                             prefix='encoder_r',
                                             mask=xr_mask)

    ctx = tensor.concatenate([proj[0][-1], projr[0][-1]], axis=1)

    return embedding, x_mask, ctx


# some utilities
DBN_wm.py 文件源码 项目:DeepDTIs_DBN 作者: Bjoux2 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def errors(self, y):
        """Return a float representing the number of errors in the minibatch
        over the total number of examples of the minibatch ; zero one
        loss over the size of the minibatch

        :type y: theano.tensor.TensorType
        :param y: corresponds to a vector that gives for each example the
                  correct label
        """

        # check if y has same dimension of y_pred
        if y.ndim != self.y_pred.ndim:
            raise TypeError(
                'y should have the same shape as self.y_pred',
                ('y', y.type, 'y_pred', self.y_pred.type)
            )
        # check if y is of the correct datatype
        if y.dtype.startswith('int'):
            # the T.neq operator returns a vector of 0s and 1s, where 1
            # represents a mistake in prediction
            return T.mean(T.neq(self.y_pred, y))
        else:
            raise NotImplementedError()
logistic_sgd.py 文件源码 项目:DeepDTIs_DBN 作者: Bjoux2 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def errors(self, y):
        """Return a float representing the number of errors in the minibatch
        over the total number of examples of the minibatch ; zero one
        loss over the size of the minibatch

        :type y: theano.tensor.TensorType
        :param y: corresponds to a vector that gives for each example the
                  correct label
        """

        # check if y has same dimension of y_pred
        if y.ndim != self.y_pred.ndim:
            raise TypeError(
                'y should have the same shape as self.y_pred',
                ('y', y.type, 'y_pred', self.y_pred.type)
            )
        # check if y is of the correct datatype
        if y.dtype.startswith('int'):
            # the T.neq operator returns a vector of 0s and 1s, where 1
            # represents a mistake in prediction
            return T.mean(T.neq(self.y_pred, y))
        else:
            raise NotImplementedError()
logistic_sgd.py 文件源码 项目:tbp-next-basket 作者: GiulioRossetti 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def errors(self, y):
        """Return a float representing the number of errors in the minibatch
        over the total number of examples of the minibatch ; zero one
        loss over the size of the minibatch

        :type y: theano.tensor.TensorType
        :param y: corresponds to a vector that gives for each example the
                  correct label
        """

        # check if y has same dimension of y_pred
        if y.ndim != self.y_pred.ndim:
            raise TypeError(
                'y should have the same shape as self.y_pred',
                ('y', y.type, 'y_pred', self.y_pred.type)
            )
        # check if y is of the correct datatype
        if y.dtype.startswith('int'):
            # the T.neq operator returns a vector of 0s and 1s, where 1
            # represents a mistake in prediction
            return T.mean(T.neq(self.y_pred, y))
        else:
            raise NotImplementedError()
neuromodules.py 文件源码 项目:NCRF-AE 作者: cosmozhang 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def errors(self, y):
        """Return a float representing the number of errors in the minibatch
        over the total number of examples of the minibatch ; zero one
        loss over the size of the minibatch

        :type y: theano.tensor.TensorType
        :param y: corresponds to a vector that gives for each example the
                  correct label
        """

        # check if y has same dimension of y_pred
        if y.ndim != self.y_pred.ndim:
            raise TypeError(
                'y should have the same shape as self.y_pred',
                ('y', y.type, 'y_pred', self.y_pred.type)
            )
        # check if y is of the correct datatype
        if y.dtype.startswith('int'):
            # the T.neq operator returns a vector of 0s and 1s, where 1
            # represents a mistake in prediction
            return T.mean(T.neq(self.y_pred, y))
        else:
            raise NotImplementedError()
network.py 文件源码 项目:rllabplusplus 作者: shaneshixiang 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def wrapped_conv(*args, **kwargs):
    copy = dict(kwargs)
    copy.pop("image_shape", None)
    copy.pop("filter_shape", None)
    assert copy.pop("filter_flip", False)

    input, W, input_shape, get_W_shape = args
    if theano.config.device == 'cpu':
        return theano.tensor.nnet.conv2d(*args, **kwargs)
    try:
        return theano.sandbox.cuda.dnn.dnn_conv(
            input.astype('float32'),
            W.astype('float32'),
            **copy
        )
    except Exception as e:
        print("falling back to default conv2d")
        return theano.tensor.nnet.conv2d(*args, **kwargs)
tensor.py 文件源码 项目:pl-cnn 作者: oval-group 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def set_to_zero(list_of_tensors_and_shapes, on_gpu=True):
    """
    :param: list_of_tensors_and_shapes of the form [(tensor1, shape1), ...]
    """

    if on_gpu:
        updates = []
        for tensor, shape in list_of_tensors_and_shapes:
            if np.sum(shape) == 1:
                updates.append((tensor, 0))
            else:
                updates.append((tensor,
                                T.patternbroadcast(T.zeros(shape),
                                                   [False] * tensor.ndim)))

        return updates

    else:
        updates = []
        for tensor, shape in list_of_tensors_and_shapes:
            updates.append((tensor, np.zeros(shape, dtype=config_.floatX)))
        return updates
net.py 文件源码 项目:3D-R2N2 作者: chrischoy 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, random_seed=dt.datetime.now().microsecond, compute_grad=True):
        self.rng = np.random.RandomState(random_seed)

        self.batch_size = cfg.CONST.BATCH_SIZE
        self.img_w = cfg.CONST.IMG_W
        self.img_h = cfg.CONST.IMG_H
        self.n_vox = cfg.CONST.N_VOX
        self.compute_grad = compute_grad

        # (self.batch_size, 3, self.img_h, self.img_w),
        # override x and is_x_tensor4 when using multi-view network
        self.x = tensor.tensor4()
        self.is_x_tensor4 = True

        # (self.batch_size, self.n_vox, 2, self.n_vox, self.n_vox),
        self.y = tensor5()

        self.activations = []  # list of all intermediate activations
        self.loss = []  # final loss
        self.output = []  # final output
        self.error = []  # final output error
        self.params = []  # all learnable params
        self.grads = []  # will be filled out automatically
        self.setup()
layers.py 文件源码 项目:3D-R2N2 作者: chrischoy 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def set_output(self):
        output_shape = self._output_shape
        padding = self._padding
        unpool_size = self._unpool_size
        unpooled_output = tensor.alloc(0.0,  # Value to fill the tensor
                                       output_shape[0],
                                       output_shape[1] + 2 * padding[0],
                                       output_shape[2],
                                       output_shape[3] + 2 * padding[1],
                                       output_shape[4] + 2 * padding[2])

        unpooled_output = tensor.set_subtensor(unpooled_output[:, padding[0]:output_shape[
            1] + padding[0]:unpool_size[0], :, padding[1]:output_shape[3] + padding[1]:unpool_size[
                1], padding[2]:output_shape[4] + padding[2]:unpool_size[2]],
                                               self._prev_layer.output)
        self._output = unpooled_output
layers.py 文件源码 项目:3D-R2N2 作者: chrischoy 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def set_output(self):
        padding = self._padding
        input_shape = self._input_shape
        if np.sum(self._padding) > 0:
            padded_input = tensor.alloc(0.0,  # Value to fill the tensor
                                        input_shape[0],
                                        input_shape[1] + 2 * padding[1],
                                        input_shape[2],
                                        input_shape[3] + 2 * padding[3],
                                        input_shape[4] + 2 * padding[4])

            padded_input = tensor.set_subtensor(
                padded_input[:, padding[1]:padding[1] + input_shape[1], :, padding[3]:padding[3] +
                             input_shape[3], padding[4]:padding[4] + input_shape[4]],
                self._prev_layer.output)
        else:
            padded_input = self._prev_layer.output

        self._output = conv3d2d.conv3d(padded_input, self.W.val) + \
            self.b.val.dimshuffle('x', 'x', 0, 'x', 'x')
layers.py 文件源码 项目:3D-R2N2 作者: chrischoy 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def set_output(self):
        padding = self._padding
        input_shape = self._input_shape
        padded_input = tensor.alloc(0.0,  # Value to fill the tensor
                                    input_shape[0],
                                    input_shape[1] + 2 * padding[1],
                                    input_shape[2],
                                    input_shape[3] + 2 * padding[3],
                                    input_shape[4] + 2 * padding[4])

        padded_input = tensor.set_subtensor(padded_input[:, padding[1]:padding[1] + input_shape[
            1], :, padding[3]:padding[3] + input_shape[3], padding[4]:padding[4] + input_shape[4]],
                                            self._prev_layer.output)

        fc_output = tensor.reshape(
            tensor.dot(self._fc_layer.output, self.Wx.val), self._output_shape)
        self._output = conv3d2d.conv3d(padded_input, self.Wh.val) + \
            fc_output + self.b.val.dimshuffle('x', 'x', 0, 'x', 'x')
layers.py 文件源码 项目:3D-R2N2 作者: chrischoy 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def set_output(self):
        padding = self._padding
        input_shape = self._input_shape
        padded_input = tensor.alloc(0.0,  # Value to fill the tensor
                                    input_shape[0],
                                    input_shape[1] + 2 * padding[1],
                                    input_shape[2],
                                    input_shape[3] + 2 * padding[3],
                                    input_shape[4] + 2 * padding[4])

        padded_input = tensor.set_subtensor(padded_input[:, padding[1]:padding[1] + input_shape[
            1], :, padding[3]:padding[3] + input_shape[3], padding[4]:padding[4] + input_shape[4]],
                                            self._prev_layer.output)

        self._output = conv3d2d.conv3d(padded_input, self.W.val) + \
            self.b.val.dimshuffle('x', 'x', 0, 'x', 'x')
main.py 文件源码 项目:enc_nli 作者: lukecq1231 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def dropout_layer(state_before, use_noise, trng):
    """
    tensor switch is like an if statement that checks the
    value of the theano shared variable (use_noise), before
    either dropping out the state_before tensor or
    computing the appropriate activation. During training/testing
    use_noise is toggled on and off.
    """
    proj = tensor.switch(
        use_noise,
        state_before * trng.binomial(state_before.shape, p=0.5, n=1,
                                     dtype=state_before.dtype),
        state_before * 0.5)
    return proj


# make prefix-appended name
dssm.py 文件源码 项目:recom-system 作者: tizot 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_multi_dssm(input_var=None, num_samples=None, num_entries=6, num_ngrams=42**3, num_hid1=300, num_hid2=300, num_out=128):
    """Builds a DSSM structure in a Lasagne/Theano way.

    The built DSSM is the neural network that computes the projection of only one paper.
    The input ``input_var`` should have two dimensions: (``num_samples * num_entries``, ``num_ngrams``).
    The output is then computed in a batch way: one paper at a time, but all papers from the same sample in the dataset are grouped
    (cited papers, citing papers and ``num_entries - 2`` irrelevant papers).

    Args:
        input_var (:class:`theano.tensor.TensorType` or None): symbolic input variable of the DSSM
        num_samples (int): the number of samples in the batch input dataset (number of rows)
        num_entries (int): the number of compared papers in the DSSM structure
        num_ngrams (int): the size of the vocabulary
        num_hid1 (int): the number of units in the first hidden layer
        num_hid2 (int): the number of units in the second hidden layer
        num_out (int): the number of units in the output layer

    Returns:
        :class:`lasagne.layers.Layer`: the output layer of the DSSM
    """

    assert (num_entries > 2)

    # Initialise input layer
    if num_samples is None:
        num_rows = None
    else:
        num_rows = num_samples * num_entries

    l_in = layers.InputLayer(shape=(num_rows, num_ngrams), input_var=input_var)

    # Initialise the hidden and output layers or the DSSM
    l_hid1 = layers.DenseLayer(l_in, num_units=num_hid1, nonlinearity=nonlinearities.tanh, W=init.GlorotUniform())
    l_hid2 = layers.DenseLayer(l_hid1, num_units=num_hid2, nonlinearity=nonlinearities.tanh, W=init.GlorotUniform())
    l_out = layers.DenseLayer(l_hid2, num_units=num_out, nonlinearity=nonlinearities.tanh, W=init.GlorotUniform())

    l_out = layers.ExpressionLayer(l_out, lambda X: X / X.norm(2), output_shape='auto')

    return l_out
dssm.py 文件源码 项目:recom-system 作者: tizot 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def compute_loss(output, num_samples, num_entries=6, gamma=500.0):
    """Compute the loss of a dataset, given the output of the DSSM.

    Args:
        output (:class:`lasagne.layers.Layer`): the output of the DSSM
        num_samples (int): the number of samples in the dataset
        num_entries (int): the number of compared papers in the DSSM structure
        gamma (float): the coefficient applied in the softmax of the similarities

    Returns:
        theano.tensor.TensorType: the loss of the dataset
    """
    assert (num_entries > 2)
    assert (num_samples > 0)

    # Post-NN operations to compute the loss
    # First, we extract the first output of each bundle
    mask = np.zeros(num_entries * num_samples)
    mask[::num_entries] = 1
    unmask = np.ones(num_entries * num_samples) - mask
    cited = T.extra_ops.compress(mask, output, axis=0)
    odocs = T.extra_ops.compress(unmask, output, axis=0)

    # We duplicate each row 'x' num_entries-1 times
    cited = T.extra_ops.repeat(cited, num_entries-1, axis=0)
    # Then we compute element-wise product of x with each y, for each bundle
    sims = T.sum(cited * odocs, axis=1)

    # We reshape the similarities
    sims = T.reshape(sims, (num_samples, num_entries-1))
    sims = gamma * sims

    # We take the softmax of each row
    probs = T.nnet.softmax(sims)

    # We compute the loss as the sum of element on the first column
    loss_mask = np.zeros(num_entries-1)
    loss_mask[0] = 1
    loss = T.extra_ops.compress(loss_mask, probs, axis=1)

    return -T.log(T.prod(loss))
ops.py 文件源码 项目:sampleRNN_ICLR2017 作者: soroushmehr 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def extend_middle_dim(_2D, num):
    """
    Gets a 2D tensor (A, B), outputs a 3D tensor (A, num, B)
    :usage:
        >>> TODO
    """
    rval = _2D.dimshuffle((0, 'x', 1))
    rval = T.alloc(rval, rval.shape[0], num, rval.shape[2])
    return rval
ops.py 文件源码 项目:sampleRNN_ICLR2017 作者: soroushmehr 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def T_one_hot(inp_tensor, n_classes):
    """
    :todo:
        - Implement other methods from here: 
        - Compare them speed-wise for different sizes
        - Implement N_one_hot for Numpy version, with speed tests.

    Theano one-hot (1-of-k) from an input tensor of indecies.
    If the indecies are of the shape (a0, a1, ..., an) the output
    shape would be (a0, a1, ..., a2, n_classes).

    :params:
        - inp_tensor: any theano tensor with dtype int* as indecies and all of
                      them between [0, n_classes-1].
        - n_classes: number of classes which determines the output size.

    :usage:
        >>> idx = T.itensor3()
        >>> idx_val = numpy.array([[[0,1,2,3],[4,5,6,7]]], dtype='int32')
        >>> one_hot = T_one_hot(t, 8)
        >>> one_hot.eval({idx:idx_val})
        >>> print out
        array([[[[ 1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
         [ 0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.],
         [ 0.,  0.,  1.,  0.,  0.,  0.,  0.,  0.],
         [ 0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.]],
        [[ 0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.],
         [ 0.,  0.,  0.,  0.,  0.,  1.,  0.,  0.],
         [ 0.,  0.,  0.,  0.,  0.,  0.,  1.,  0.],
         [ 0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.]]]])
        >>> print idx_val.shape, out.shape
        (1, 2, 4) (1, 2, 4, 8)
    """
    flattened = inp_tensor.flatten()
    z = T.zeros((flattened.shape[0], n_classes), dtype=theano.config.floatX)
    one_hot = T.set_subtensor(z[T.arange(flattened.shape[0]), flattened], 1)
    out_shape = [inp_tensor.shape[i] for i in xrange(inp_tensor.ndim)] + [n_classes]
    one_hot = one_hot.reshape(out_shape)
    return one_hot
skipthoughts.py 文件源码 项目:TAC-GAN 作者: dashayushman 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def build_encoder(tparams, options):
    """
    build an encoder, given pre-computed word embeddings
    """
    # word embedding (source)
    embedding = tensor.tensor3('embedding', dtype='float32')
    x_mask = tensor.matrix('x_mask', dtype='float32')

    # encoder
    proj = get_layer(options['encoder'])[1](tparams, embedding, options,
                                            prefix='encoder',
                                            mask=x_mask)
    ctx = proj[0][-1]

    return embedding, x_mask, ctx
layer.py 文件源码 项目:deep_srl 作者: luheng 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def connect(self, inputs):
    features = [None] * self.num_feature_types
    for i in range(self.num_feature_types):
      indices = inputs[:,:,i].flatten()
      proj_shape = [inputs.shape[0], inputs.shape[1], self.embedding_shapes[i][1]]
      features[i] = self.embeddings[i][indices].reshape(proj_shape)

    if self.num_feature_types == 1:
      return features[0]
    return tensor.concatenate(features, axis=2)
layer.py 文件源码 项目:deep_srl 作者: luheng 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def connect(self, inputs):
    energy = tensor.dot(inputs, self.W) + self.b
    energy = energy.reshape([energy.shape[0] * energy.shape[1], energy.shape[2]])
    log_scores = tensor.log(tensor.nnet.softmax(energy))
    predictions = tensor.argmax(log_scores, axis=-1)
    return (log_scores, predictions)


问题


面经


文章

微信
公众号

扫码关注公众号