python类reduce_prod()的实例源码

layers.py 文件源码 项目:rllabplusplus 作者: shaneshixiang 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_output_for(self, input, **kwargs):
        # total_entries = tf.reduce_prod(tf.shape(input))
        pre_shape = tf.shape(input)[:self.outdim - 1]
        to_flatten = tf.reduce_prod(tf.shape(input)[self.outdim - 1:])
        return tf.reshape(input, tf.concat(axis=0, values=[pre_shape, tf.stack([to_flatten])]))
bernoulli.py 文件源码 项目:rllabplusplus 作者: shaneshixiang 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
        old_p = old_dist_info_vars["p"]
        new_p = new_dist_info_vars["p"]
        ndims = old_p.get_shape().ndims
        return tf.reduce_prod(x_var * new_p / (old_p + TINY) + (1 - x_var) * (1 - new_p) / (1 - old_p + TINY),
                              axis=ndims - 1)
fwgrad.py 文件源码 项目:tensorflow-forward-ad 作者: renmengye 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def MaxPool_FwGrad(op,
                   dx,
                   ksize=[1, 2, 2, 1],
                   strides=[1, 2, 2, 1],
                   padding="SAME",
                   _op_table=None,
                   _grad_table=None):
  """Forward gradient operator for max pooling.

  Args:
    x: Input tensor, 4D tensor, [N, H, W, C].
    dx: Gradient of the input tensor, 4D tensor, [N, H, W, C].
    ksize: Kernel size of the max pooling operator, list of integers.
    strides: Strides of the max pooling operator, list of integers.
    padding: Padding, string, "SAME" or "VALID".
    data_format: "NHWC" or "NCHW".
  """
  if dx is None:
    return None
  x = op.inputs[0]
  y = op.outputs[0]
  _, argmax = tf.nn.max_pool_with_argmax(x, ksize, strides, padding)
  dx_flat = tf.reshape(dx, [-1])
  argmax_flat = tf.reshape(argmax, [-1])
  y_zero = tf.zeros_like(y, dtype=argmax.dtype)
  x_shape = tf.cast(tf.shape(x), argmax.dtype)
  batch_dim = tf.reshape(
      tf.range(
          x_shape[0], dtype=argmax.dtype), [-1, 1, 1, 1])
  nelem = tf.reduce_prod(x_shape[1:])
  batch_dim *= nelem
  batch_dim += y_zero
  batch_dim = tf.reshape(batch_dim, [-1])
  argmax_flat += batch_dim
  dx_sel = tf.gather(dx_flat, argmax_flat)
  dy = tf.reshape(dx_sel, tf.shape(argmax))
  return dy
test_ternary_encoder_decoder.py 文件源码 项目:terngrad 作者: wenwei202 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def ternary_decoder(encoded_data, scaler, shape):
  """Decoding the signs to float format """
  a = tf.cast(encoded_data, tf.int32)
  a_split1 = tf.mod(a,4)
  a_split2 = tf.to_int32(tf.mod(a/4,4))
  a_split3 = tf.to_int32(tf.mod(a/16,4))
  a_split4 = tf.to_int32(tf.mod(a/64,4))
  a = tf.concat([a_split1, a_split2, a_split3, a_split4], 0)
  real_size = tf.reduce_prod(shape)
  a = tf.to_float(a)
  a = tf.gather(a, tf.range(0,real_size))
  a = tf.reshape(a, shape)
  a = tf.subtract(a, 1)
  decoded = a*scaler
  return decoded
modellib.py 文件源码 项目:rec-attend-public 作者: renmengye 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def f_match_loss(y_out, y_gt, match, timespan, loss_fn, model=None):
  """Binary cross entropy with matching.
  Args:
    y_out: [B, N, H, W] or [B, N, D]
    y_gt: [B, N, H, W] or [B, N, D]
    match: [B, N, N]
    match_count: [B]
    timespan: N
    loss_fn: 
  """
  # N * [B, 1, H, W]
  y_out_list = tf.split(1, timespan, y_out)
  # N * [B, 1, N]
  match_list = tf.split(1, timespan, match)
  err_list = [None] * timespan
  shape = tf.shape(y_out)
  num_ex = tf.to_float(shape[0])
  num_dim = tf.to_float(tf.reduce_prod(tf.to_float(shape[2:])))
  sshape = tf.size(shape)

  # [B, N, M] => [B, N]
  match_sum = tf.reduce_sum(match, reduction_indices=[2])
  # [B, N] => [B]
  match_count = tf.reduce_sum(match_sum, reduction_indices=[1])
  match_count = tf.maximum(match_count, 1)

  for ii in range(timespan):
    # [B, 1, H, W] * [B, N, H, W] => [B, N, H, W] => [B, N]
    # [B, N] * [B, N] => [B]
    # [B] => [B, 1]
    red_idx = tf.range(2, sshape)
    err_list[ii] = tf.expand_dims(
        tf.reduce_sum(
            tf.reduce_sum(loss_fn(y_out_list[ii], y_gt), red_idx) *
            tf.reshape(match_list[ii], [-1, timespan]), [1]), 1)

  # N * [B, 1] => [B, N] => [B]
  err_total = tf.reduce_sum(tf.concat(1, err_list), reduction_indices=[1])

  return tf.reduce_sum(err_total / match_count) / num_ex / num_dim
modellib.py 文件源码 项目:rec-attend-public 作者: renmengye 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get_normalized_gamma(size, filter_height, filter_width):
  """Get normalized gamma.
  Args:
    size: [B, T, 2] or [B, 2] or [2]
    filter_height: int
    filter_width: int
  Returns:
    lg_gamma: [B, T] or [B] or float
  """
  rank = tf.shape(tf.shape(size))
  filter_area = filter_height * filter_width
  area = tf.reduce_prod(size, rank - 1)
  lg_gamma = tf.log(float(filter_area)) - tf.log(area)
  return lg_gamma
tensorflow_backend.py 文件源码 项目:keraflow 作者: ipod825 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def prod(self, x, axis=None, keepdims=False):
        '''Multiplies the values in a tensor, alongside the specified axis.
        '''
        return tf.reduce_prod(x, reduction_indices=axis, keep_dims=keepdims)
MIL_server.py 文件源码 项目:image-text-matching 作者: llltttppp 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def imagenet(self, image_feat, reuse=False):
        with tf.variable_scope('image_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
            image_fc1 = tf.contrib.layers.fully_connected(image_feat,4096, weights_regularizer=wd,scope='i_fc1')
            image_fc2 = tf.contrib.layers.fully_connected(image_fc1, self.num_class, activation_fn=None,weights_regularizer=wd, scope='i_fc2')
            prob = tf.reduce_mean(image_fc2,axis=1)#1-tf.reduce_prod(1-image_fc2,axis=1)
        self.endpoint['image_fc1'] = image_fc1
        self.endpoint['image_fc2'] = image_fc2
        self.endpoint['prob'] = prob
        return prob
MIL.py 文件源码 项目:image-text-matching 作者: llltttppp 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def imagenet(self, image_feat, reuse=False):
        with tf.variable_scope('image_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
            image_fc1 = tf.contrib.layers.fully_connected(image_feat,4096, weights_regularizer=wd,scope='i_fc1')
            image_fc2 = tf.contrib.layers.fully_connected(image_fc1, 5000, activation_fn=tf.nn.sigmoid, weights_regularizer=wd, scope='i_fc2')
            prob = 1-tf.reduce_prod(1-image_fc2,axis=1)
        self.endpoint['image_fc1'] = image_fc1
        self.endpoint['image_fc2'] = image_fc2
        self.endpoint['prob'] = prob
        return prob
core.py 文件源码 项目:dataset 作者: analysiscenter 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def flatten2d(inputs, name=None):
    """ Flatten tensor to two dimensions (batch_size, item_vector_size) """
    x = tf.convert_to_tensor(inputs)
    dims = tf.reduce_prod(tf.shape(x)[1:])
    x = tf.reshape(x, [-1, dims], name=name)
    return x
scoring.py 文件源码 项目:factorix 作者: gbouchar 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def multilinear(emb, tuples, l2=0):
    """
    Compute the dot product of real vectors at selected embeddings
    Note that this model is called Cannonical Parafac (CP), and corresponds to the "distmult" model in some scientific
    publications on relational database factorization.
    :param emb: embedding matrix of size [n_emb, rank] containing float numbers
    :param tuples: tuple matrix of size [n_t, arity] containing integers
    :param l2: optional l2 regularization strength that is added to the score. If it is different from 0, the function
    returns a pair (pred, l2norm) where pred is the sample prediction, but l2norm is the l2 norm of the selected
    embeddings
    :return: the multilinear dot product between selected embeddings S[i] = sum_j prod_k E[I[i,k],j]

    >>> embeddings = [[1., 1, 0, 3], [0, 1, 0, 1], [-1, 1, 1, 5]]
    >>> idx = tf.Variable([[0, 1], [1, 0], [0, 2], [2, 0], [1, 2], [2, 1]])
    >>> g = multilinear(embeddings, idx)
    >>> print(tf_eval(g))
    [  4.   4.  15.  15.   6.   6.]
    """
    emb_sel = tf.gather(emb, tuples)

    pred = tf.reduce_sum(tf.reduce_prod(emb_sel, 1), 1)
    if l2 == 0:  # unregularized prediction ==> returns only the predictions
        return pred
    else:  # l2 regularization of the selected embeddings
        reg = l2 * tf.reduce_sum(tf.square(emb_sel))
        return pred, reg
scoring.py 文件源码 项目:factorix 作者: gbouchar 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def multilinear_grad(emb: tf.Tensor, tuples: tf.Tensor, score=False) -> tf.Tensor:
    tuple_shape = [d.value for d in tuples.get_shape()]
    # if len(tuple_shape) > 2:
    #     n = np.prod(tuple_shape[:-1])
    #     tuples = tf.reshape(tuples, (n, -1))
    # n = tuples.get_shape()[0].value
    order = tuples.get_shape()[2].value
    rank = emb.get_shape()[-1].value
    if order == 2:
        if score:
            emb_sel = tf.gather(emb, tuples)
            grad_score = tf.reshape(tf.reverse(emb_sel, [False, False, True, False]), tuple_shape[:-1] + [2, rank])
            prod = tf.reduce_prod(emb_sel, 2)
            preds = tf.reshape(tf.reduce_sum(prod, 2), tuple_shape[:-1])
            return grad_score, preds
    raise NotImplementedError('Todo')
                # grad_score0 = tf.reverse(emb_sel, [False, True, False])  # reverse the row and column embeddings
    #         prod = tf.reduce_prod(emb_sel, 1)
    #         preds = tf.reshape(tf.reduce_sum(prod, 1), tuple_shape[:-1])
    #
    #     preds = tf.reshape(tf.reduce_sum(prod, 1), tuple_shape[:-1])
    # else:  # derivative of a product
    #     prod = tf.reduce_prod(emb_sel, 1)
    #     grad_score0 = tf.tile(tf.reshape(prod, (n, 1, rank)), (1, order, 1)) / emb_sel
    # grad_score = tf.reshape(grad_score0, tuple_shape + [rank])
    # if score:
    #         prod = tf.reduce_prod(emb_sel, 1)
    #     preds = tf.reshape(tf.reduce_sum(prod, 1), tuple_shape[:-1])
    #     return grad_score, preds
    # else:
    #     return grad_score
sdautoencoder.py 文件源码 项目:deep-learning 作者: lbkchen 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def corrupt(tensor, corruption_level=0.05):
    """Uses the masking noise algorithm to mask corruption_level proportion
    of the input.

    :param tensor: A tensor whose values are to be corrupted.
    :param corruption_level: An int [0, 1] specifying the probability to corrupt each value.
    :return: The corrupted tensor.
    """
    total_samples = tf.reduce_prod(tf.shape(tensor))
    corruption_matrix = tf.multinomial(tf.log([[corruption_level, 1 - corruption_level]]), total_samples)
    corruption_matrix = tf.cast(tf.reshape(corruption_matrix, shape=tf.shape(tensor)), dtype=tf.float32)
    return tf.mul(tensor, corruption_matrix)
tracker.py 文件源码 项目:hart 作者: akosiorek 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _area_loss(pred_bbox, img_size, presence):
    area = pred_bbox[..., 2] * pred_bbox[..., 3]
    ratio = area / tf.reduce_prod(tf.to_float(img_size))
    weights = tf.clip_by_value(ratio, 1., 10.)
    ratio = tf.clip_by_value(ratio, 0., 1.)
    return _time_weighted_nll(1 - ratio, presence, weights)
eval_tools.py 文件源码 项目:hart 作者: akosiorek 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def log_norm(expr_list, name):
    """

    :param expr_list:
    :param name:
    :return:
    """
    n_elems = 0
    norm = 0.
    for e in expr_list:
        n_elems += tf.reduce_prod(tf.shape(e))
        norm += tf.reduce_sum(e**2)
    norm /= tf.to_float(n_elems)
    tf.summary.scalar(name, norm)
    return norm
train_tools.py 文件源码 项目:hart 作者: akosiorek 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def minimize_clipped(optimizer, loss, clip_value, return_gvs=False, soft=False, **kwargs):
    """Computes a train_op with clipped gradients in the range [-clip_value, clip_value]

    :param optimizer: Tensorflow optimizer object
    :param loss: tensor
    :param clip_value: scalar value
    :param return_gvs: returns list of tuples of (gradient, parameter) for trainable variables
    :param kwargs: kwargs for optimizer.compute_gradients function
    :return: train_step
    """

    gvs = optimizer.compute_gradients(loss, **kwargs)
    clipped_gvs = [(g, v) for (g, v) in gvs if g is not None]

    if not soft:
        clipped_gvs = [(tf.clip_by_value(g, -clip_value, clip_value), v) for (g, v) in clipped_gvs]

    else:
        n_elems = 0
        norm_squared = 0.
        for g, v in gvs:
            n_elems += tf.reduce_prod(tf.shape(g))
            norm_squared += tf.reduce_sum(g ** 2)

        norm_squared /= tf.to_float(n_elems)
        inv_norm = gen_math_ops.rsqrt(norm_squared)
        cond = tf.greater(norm_squared, clip_value ** 2)

        def clip(x):
            return tf.cond(cond, lambda: clip_value * x * inv_norm, lambda: x)

        clipped_gvs = [(clip(g), v) for (g, v) in clipped_gvs]

    train_step = optimizer.apply_gradients(clipped_gvs)

    if return_gvs:
        train_step = (train_step, gvs)
    return train_step
test_optimizers.py 文件源码 项目:GPflow 作者: GPflow 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _build_likelihood(self):
        return tf.reduce_sum(self.a) + sum(map(tf.reduce_prod, self.trainable_vars))
features.py 文件源码 项目:GPflow 作者: GPflow 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def Kuf(self, kern, Xnew):
        if isinstance(kern, kernels.RBF):
            with decors.params_as_tensors_for(kern):
                Xnew, _ = kern._slice(Xnew, None)
                Zmu, Zlen = kern._slice(self.Z, self.scales)
                idlengthscales = kern.lengthscales + Zlen
                d = self._cust_square_dist(Xnew, Zmu, idlengthscales)
                Kuf = tf.transpose(kern.variance * tf.exp(-d / 2) *
                                   tf.reshape(tf.reduce_prod(kern.lengthscales / idlengthscales, 1),
                                              (1, -1)))
            return Kuf
        else:
            raise NotImplementedError(
                "Multiscale features not implemented for `%s`." % str(type(kern)))
features.py 文件源码 项目:GPflow 作者: GPflow 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def Kuu(self, kern, jitter=0.0):
        if isinstance(kern, kernels.RBF):
            with decors.params_as_tensors_for(kern):
                Zmu, Zlen = kern._slice(self.Z, self.scales)
                idlengthscales2 = tf.square(kern.lengthscales + Zlen)
                sc = tf.sqrt(
                    tf.expand_dims(idlengthscales2, 0) + tf.expand_dims(idlengthscales2, 1) - tf.square(
                        kern.lengthscales))
                d = self._cust_square_dist(Zmu, Zmu, sc)
                Kzz = kern.variance * tf.exp(-d / 2) * tf.reduce_prod(kern.lengthscales / sc, 2)
                Kzz += jitter * tf.eye(len(self), dtype=settings.float_type)
            return Kzz
        else:
            raise NotImplementedError(
                "Multiscale features not implemented for `%s`." % str(type(kern)))
ekernels.py 文件源码 项目:GPflow 作者: GPflow 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def Linear_RBF_eKxzKzx(self, Ka, Kb, Z, Xmu, Xcov):
        Xcov = self._slice_cov(Xcov)
        Z, Xmu = self._slice(Z, Xmu)
        lin, rbf = (Ka, Kb) if isinstance(Ka, Linear) else (Kb, Ka)
        if not isinstance(lin, Linear):
            TypeError("{in_lin} is not {linear}".format(in_lin=str(type(lin)), linear=str(Linear)))
        if not isinstance(rbf, RBF):
            TypeError("{in_rbf} is not {rbf}".format(in_rbf=str(type(rbf)), rbf=str(RBF)))
        if lin.ARD or type(lin.active_dims) is not slice or type(rbf.active_dims) is not slice:
            raise NotImplementedError("Active dims and/or Linear ARD not implemented. "
                                      "Switching to quadrature.")
        D = tf.shape(Xmu)[1]
        M = tf.shape(Z)[0]
        N = tf.shape(Xmu)[0]

        if rbf.ARD:
            lengthscales = rbf.lengthscales
        else:
            lengthscales = tf.zeros((D, ), dtype=settings.float_type) + rbf.lengthscales

        lengthscales2 = lengthscales ** 2.0
        const = rbf.variance * lin.variance * tf.reduce_prod(lengthscales)
        gaussmat = Xcov + tf.matrix_diag(lengthscales2)[None, :, :]  # NxDxD
        det = tf.matrix_determinant(gaussmat) ** -0.5  # N

        cgm = tf.cholesky(gaussmat)  # NxDxD
        tcgm = tf.tile(cgm[:, None, :, :], [1, M, 1, 1])
        vecmin = Z[None, :, :] - Xmu[:, None, :]  # NxMxD
        d = tf.matrix_triangular_solve(tcgm, vecmin[:, :, :, None])  # NxMxDx1
        exp = tf.exp(-0.5 * tf.reduce_sum(d ** 2.0, [2, 3]))  # NxM
        # exp = tf.Print(exp, [tf.shape(exp)])

        vecplus = (Z[None, :, :, None] / lengthscales2[None, None, :, None] +
                   tf.matrix_solve(Xcov, Xmu[:, :, None])[:, None, :, :])  # NxMxDx1
        mean = tf.cholesky_solve(
            tcgm, tf.matmul(tf.tile(Xcov[:, None, :, :], [1, M, 1, 1]), vecplus))
        mean = mean[:, :, :, 0] * lengthscales2[None, None, :]  # NxMxD
        a = tf.matmul(tf.tile(Z[None, :, :], [N, 1, 1]),
                      mean * exp[:, :, None] * det[:, None, None] * const, transpose_b=True)
        return a + tf.transpose(a, [0, 2, 1])


问题


面经


文章

微信
公众号

扫码关注公众号