python类rank()的实例源码

utils.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def is_same_dynamic_shape(x, y):
    """
    Whether `x` and `y` has the same dynamic shape.

    :param x: A Tensor.
    :param y: A Tensor.
    :return: A scalar Tensor of `bool`.
    """
    # There is a BUG of Tensorflow for not doing static shape inference
    # right in nested tf.cond()'s, so we are not comparing x and y's
    # shape directly but working with their concatenations.
    return tf.cond(
        tf.equal(tf.rank(x), tf.rank(y)),
        lambda: tf.reduce_all(tf.equal(
            tf.concat([tf.shape(x), tf.shape(y)], 0),
            tf.concat([tf.shape(y), tf.shape(x)], 0))),
        lambda: tf.convert_to_tensor(False, tf.bool))
utils.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def assert_rank_at_least(tensor, k, name):
    """
    Whether the rank of `tensor` is at least k.

    :param tensor: A tensor to be checked.
    :param k: The least rank allowed.
    :param name: The name of `tensor` for error message.
    :return: The checked tensor.
    """
    static_shape = tensor.get_shape()
    shape_err_msg = '{} should have rank >= {}.'.format(name, k)
    if static_shape and (static_shape.ndims < k):
        raise ValueError(shape_err_msg)
    if not static_shape:
        _assert_shape_op = tf.assert_rank_at_least(
            tensor, k, message=shape_err_msg)
        with tf.control_dependencies([_assert_shape_op]):
            tensor = tf.identity(tensor)
    return tensor
preprocess.py 文件源码 项目:antgo 作者: jianzfb 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  cropped_shape = control_flow_ops.with_dependencies(
      [rank_assertion],
      tf.stack([crop_height, crop_width, original_shape[2]]))

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  image = control_flow_ops.with_dependencies([size_assertion], tf.slice(image, offsets, cropped_shape))
  return tf.reshape(image, cropped_shape)
utils.py 文件源码 项目:Master-R-CNN 作者: Mark110 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  cropped_shape = control_flow_ops.with_dependencies(
      [rank_assertion],
      tf.stack([crop_height, crop_width, original_shape[2]]))

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  image = control_flow_ops.with_dependencies(
      [size_assertion],
      tf.slice(image, offsets, cropped_shape))
  return tf.reshape(image, cropped_shape)
eval_tools.py 文件源码 项目:hart 作者: akosiorek 项目源码 文件源码 阅读 65 收藏 0 点赞 0 评论 0
def image_series_summary(tag, imgs, max_timesteps=10):
    # take only 3 items from the minibatch
    imgs = imgs[:, :3]

    # assume img.shape == (T, batch_size, n_obj, H, W, C)
    # let's log only for 1st obj
    tf.cond(tf.equal(tf.rank(imgs), 6), lambda: imgs[:, :, 0], lambda: imgs)

    shape = (max_timesteps,) + tuple(imgs.get_shape()[1:])
    nt = tf.shape(imgs)[0]

    def pad():
        paddings = tf.concat(axis=0, values=([[0, max_timesteps - nt]], tf.zeros((len(shape) - 1, 2), tf.int32)))
        return tf.pad(imgs, paddings)

    imgs = tf.cond(tf.greater(nt, max_timesteps), lambda: imgs[:max_timesteps], pad)
    imgs.set_shape(shape)
    imgs = tf.squeeze(imgs)
    imgs = tf.unstack(imgs)

    # concatenate along the columns
    imgs = tf.concat(axis=2, values=imgs)
    tf.summary.image(tag, imgs)
tensor_ops.py 文件源码 项目:hart 作者: akosiorek 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def broadcast_against(tensor, against_expr):
    """Adds trailing dimensions to mask to enable broadcasting against data

    :param tensor: tensor to be broadcasted
    :param against_expr: tensor will be broadcasted against it
    :return: mask expr with tf.rank(mask) == tf.rank(data)
    """

    def cond(data, tensor):
        return tf.less(tf.rank(tensor), tf.rank(data))

    def body(data, tensor):
        return data, tf.expand_dims(tensor, -1)

    shape_invariants = [against_expr.get_shape(), tf.TensorShape(None)]
    _, tensor = tf.while_loop(cond, body, [against_expr, tensor], shape_invariants)
    return tensor
kernels.py 文件源码 项目:GPflow 作者: GPflow 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def _slice_cov(self, cov):
        """
        Slice the correct dimensions for use in the kernel, as indicated by
        `self.active_dims` for covariance matrices. This requires slicing the
        rows *and* columns. This will also turn flattened diagonal
        matrices into a tensor of full diagonal matrices.
        :param cov: Tensor of covariance matrices (NxDxD or NxD).
        :return: N x self.input_dim x self.input_dim.
        """
        cov = tf.cond(tf.equal(tf.rank(cov), 2), lambda: tf.matrix_diag(cov), lambda: cov)

        if isinstance(self.active_dims, slice):
            cov = cov[..., self.active_dims, self.active_dims]
        else:
            cov_shape = tf.shape(cov)
            covr = tf.reshape(cov, [-1, cov_shape[-1], cov_shape[-1]])
            gather1 = tf.gather(tf.transpose(covr, [2, 1, 0]), self.active_dims)
            gather2 = tf.gather(tf.transpose(gather1, [1, 0, 2]), self.active_dims)
            cov = tf.reshape(tf.transpose(gather2, [2, 0, 1]),
                             tf.concat([cov_shape[:-2], [len(self.active_dims), len(self.active_dims)]], 0))
        return cov
densities.py 文件源码 项目:GPflow 作者: GPflow 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def multivariate_normal(x, mu, L):
    """
    L is the Cholesky decomposition of the covariance.

    x and mu are either vectors (ndim=1) or matrices. In the matrix case, we
    assume independence over the *columns*: the number of rows must match the
    size of L.
    """
    d = x - mu
    alpha = tf.matrix_triangular_solve(L, d, lower=True)
    num_col = 1 if tf.rank(x) == 1 else tf.shape(x)[1]
    num_col = tf.cast(num_col, settings.float_type)
    num_dims = tf.cast(tf.shape(x)[0], settings.float_type)
    ret = - 0.5 * num_dims * num_col * np.log(2 * np.pi)
    ret += - num_col * tf.reduce_sum(tf.log(tf.matrix_diag_part(L)))
    ret += - 0.5 * tf.reduce_sum(tf.square(alpha))
    return ret
base.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _sparse_loss_softmax(self, logits, labels, is_training, weighted=False):
        log.info('Using sparse softmax loss')
        labels = tf.cast(labels, tf.int64)
        if weighted:
            if tf.rank(labels) != 2:
                labels = tf.one_hot(labels, self.num_classes)
            weights = self._compute_weights(labels)
            weights = tf.reduce_max(tf.multiply(weights, labels), axis=1)
            ce_loss = tf.losses.sparse_softmax_cross_entropy(
                tf.argmax(labels, axis=1), logits=logits, weights=weights, scope='cross_entropy_loss')
        else:
            ce_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                labels=labels, logits=logits, name='cross_entropy_loss')
        ce_loss_mean = tf.reduce_mean(ce_loss, name='cross_entropy')
        if is_training:
            tf.add_to_collection('losses', ce_loss_mean)

            l2_loss = tf.add_n(tf.get_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES))
            l2_loss = l2_loss * self.cnf.get('l2_reg', 0.0)
            tf.add_to_collection('losses', l2_loss)

            return tf.add_n(tf.get_collection('losses'), name='total_loss')
        else:
            return ce_loss_mean
base.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _loss_softmax(self, logits, labels, is_training, weighted=False):
        log.info('Using softmax loss')
        labels = tf.cast(labels, tf.int64)
        if tf.rank(labels) != 2:
            labels = tf.one_hot(labels, self.num_classes)
        if weighted:
            weights = self._compute_weights(labels)
            weights = tf.reduce_max(tf.multiply(weights, labels), axis=1)
            ce_loss = tf.losses.softmax_cross_entropy(
                labels, logits=logits, weights=weights, label_smoothing=self.label_smoothing, scope='cross_entropy_loss')
        else:
            ce_loss = tf.nn.softmax_cross_entropy_with_logits(
                labels=labels, logits=logits, name='cross_entropy_loss')
        ce_loss_mean = tf.reduce_mean(ce_loss, name='cross_entropy')
        if is_training:
            tf.add_to_collection('losses', ce_loss_mean)

            l2_loss = tf.add_n(tf.get_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES))
            l2_loss = l2_loss * self.cnf.get('l2_reg', 0.0)
            tf.add_to_collection('losses', l2_loss)

            return tf.add_n(tf.get_collection('losses'), name='total_loss')
        else:
            return ce_loss_mean
base.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _loss_sigmoid(self, logits, labels, is_training, weighted=False):
        log.info('Using sigmoid loss')
        labels = tf.cast(labels, tf.int64)
        if tf.rank(labels) != 2:
            labels = tf.one_hot(labels, self.num_classes)
        if weighted:
            weights = self._compute_weights(labels)
            ce_loss = tf.losses.sigmoid_cross_entropy(
                labels, logits=logits, weights=weights, label_smoothing=self.label_smoothing, scope='sigmoid_cross_entropy_loss')
        else:
            ce_loss = tf.nn.sigmoid_cross_entropy_with_logits(
                labels=labels, logits=logits, name='sigmoid_cross_entropy_loss')
        ce_loss_mean = tf.reduce_mean(ce_loss, name='sigmoid_cross_entropy')
        if is_training:
            tf.add_to_collection('losses', ce_loss_mean)

            l2_loss = tf.add_n(tf.get_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES))
            l2_loss = l2_loss * self.cnf.get('l2_reg', 0.0)
            tf.add_to_collection('losses', l2_loss)

            return tf.add_n(tf.get_collection('losses'), name='total_loss')
        else:
            return ce_loss_mean
preprocessor.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def random_adjust_contrast(image, min_delta=0.8, max_delta=1.25):
    """Randomly adjusts contrast.

    Makes sure the output image is still between 0 and 1.

    Args:
      image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
             with pixel values varying between [0, 1].
      min_delta: see max_delta.
      max_delta: how much to change the contrast. Contrast will change with a
                 value between min_delta and max_delta. This value will be
                 multiplied to the current contrast of the image.

    Returns:
      image: image which is the same shape as input image.
    """
    with tf.name_scope('RandomAdjustContrast', values=[image]):
        image = tf.image.random_contrast(image, min_delta, max_delta)
        image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
        return image
preprocessor.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def random_adjust_hue(image, max_delta=0.02):
    """Randomly adjusts hue.

    Makes sure the output image is still between 0 and 1.

    Args:
      image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
             with pixel values varying between [0, 1].
      max_delta: change hue randomly with a value between 0 and max_delta.

    Returns:
      image: image which is the same shape as input image.
    """
    with tf.name_scope('RandomAdjustHue', values=[image]):
        image = tf.image.random_hue(image, max_delta)
        image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
        return image
preprocessor.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def random_adjust_saturation(image, min_delta=0.8, max_delta=1.25):
    """Randomly adjusts saturation.

    Makes sure the output image is still between 0 and 1.

    Args:
      image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
             with pixel values varying between [0, 1].
      min_delta: see max_delta.
      max_delta: how much to change the saturation. Saturation will change with a
                 value between min_delta and max_delta. This value will be
                 multiplied to the current saturation of the image.

    Returns:
      image: image which is the same shape as input image.
    """
    with tf.name_scope('RandomAdjustSaturation', values=[image]):
        image = tf.image.random_saturation(image, min_delta, max_delta)
        image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
        return image
utils.py 文件源码 项目:lang2program 作者: kelvinguu 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def broadcast(tensor, target_tensor):
    """Broadcast a tensor to match the shape of a target tensor.

    Args:
        tensor (Tensor): tensor to be tiled
        target_tensor (Tensor): tensor whose shape is to be matched
    """
    rank = lambda t: t.get_shape().ndims
    assert rank(tensor) == rank(target_tensor)  # TODO: assert that tensors have no overlapping non-unity dimensions

    orig_shape = tf.shape(tensor)
    target_shape = tf.shape(target_tensor)

    # if dim == 1, set it to target_dim
    # else, set it to 1
    tiling_factor = tf.select(tf.equal(orig_shape, 1), target_shape, tf.ones([rank(tensor)], dtype=tf.int32))
    broadcasted = tf.tile(tensor, tiling_factor)

    # Add static shape information
    broadcasted.set_shape(target_tensor.get_shape())

    return broadcasted
utils.py 文件源码 项目:lang2program 作者: kelvinguu 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def broadcast(tensor, target_tensor):
    """Broadcast a tensor to match the shape of a target tensor.

    Args:
        tensor (Tensor): tensor to be tiled
        target_tensor (Tensor): tensor whose shape is to be matched
    """
    rank = lambda t: t.get_shape().ndims
    assert rank(tensor) == rank(target_tensor)  # TODO: assert that tensors have no overlapping non-unity dimensions

    orig_shape = tf.shape(tensor)
    target_shape = tf.shape(target_tensor)

    # if dim == 1, set it to target_dim
    # else, set it to 1
    tiling_factor = tf.select(tf.equal(orig_shape, 1), target_shape, tf.ones([rank(tensor)], dtype=tf.int32))
    broadcasted = tf.tile(tensor, tiling_factor)

    # Add static shape information
    broadcasted.set_shape(target_tensor.get_shape())

    return broadcasted
girls.py 文件源码 项目:rascal-tensorflow 作者: stayrascal 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def generate_girl():
    output = pixel_cnn()

    predict = tf.reshape(tf.multinomial(tf.nn.softmax(output), num_samples=1, seed=100), tf.shape(X))
    #predict_argmax = tf.reshape(tf.argmax(tf.nn.softmax(output), dimension=tf.rank(output) - 1), tf.shape(X))

    with tf.Session() as sess: 
        sess.run(tf.initialize_all_variables())

        saver = tf.train.Saver(tf.trainable_variables())
        saver.restore(sess, 'girl.ckpt-49')

        pics = np.zeros((1*1, image_height, image_width, image_channel), dtype=np.float32)

        for i in range(image_height):
            for j in range(image_width):
                for k in range(image_channel):
                    next_pic = sess.run(predict, feed_dict={X:pics})
                    pics[:, i, j, k] = next_pic[:, i, j, k]

        cv2.imwrite('girl.jpg', pics[0])
        print('generate image: girl.jpg')

# ????
utils.py 文件源码 项目:FastMaskRCNN 作者: CharlesShang 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  cropped_shape = control_flow_ops.with_dependencies(
      [rank_assertion],
      tf.stack([crop_height, crop_width, original_shape[2]]))

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  image = control_flow_ops.with_dependencies(
      [size_assertion],
      tf.slice(image, offsets, cropped_shape))
  return tf.reshape(image, cropped_shape)
common_layers.py 文件源码 项目:tensor2tensor 作者: tensorflow 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def pool(inputs, window_size, pooling_type, padding, strides=(1, 1)):
  """Pooling (supports "LEFT")."""
  with tf.name_scope("pool", [inputs]):
    static_shape = inputs.get_shape()
    if not static_shape or len(static_shape) != 4:
      raise ValueError("Inputs to conv must have statically known rank 4.")
    # Add support for left padding.
    if padding == "LEFT":
      assert window_size[0] % 2 == 1 and window_size[1] % 2 == 1
      if len(static_shape) == 3:
        width_padding = 2 * (window_size[1] // 2)
        padding_ = [[0, 0], [width_padding, 0], [0, 0]]
      else:
        height_padding = 2 * (window_size[0] // 2)
        cond_padding = tf.cond(
            tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),
            lambda: tf.constant(2 * (window_size[1] // 2)))
        width_padding = 0 if static_shape[2] == 1 else cond_padding
        padding_ = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
      inputs = tf.pad(inputs, padding_)
      inputs.set_shape([static_shape[0], None, None, static_shape[3]])
      padding = "VALID"

  return tf.nn.pool(inputs, window_size, pooling_type, padding, strides=strides)
common_layers.py 文件源码 项目:tensor2tensor 作者: tensorflow 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def shape_list(x):
  """Return list of dims, statically where possible."""
  x = tf.convert_to_tensor(x)

  # If unknown rank, return dynamic shape
  if x.get_shape().dims is None:
    return tf.shape(x)

  static = x.get_shape().as_list()
  shape = tf.shape(x)

  ret = []
  for i in xrange(len(static)):
    dim = static[i]
    if dim is None:
      dim = shape[i]
    ret.append(dim)
  return ret
ade20k_preprocessing.py 文件源码 项目:tensorflow-pspnet 作者: pudae 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  cropped_shape = control_flow_ops.with_dependencies(
      [rank_assertion],
      tf.stack([crop_height, crop_width, original_shape[2]]))

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  image = control_flow_ops.with_dependencies(
      [size_assertion],
      tf.slice(image, offsets, cropped_shape))
  return tf.reshape(image, cropped_shape)
ops.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def expanded_shape(orig_shape, start_dim, num_dims):
  """Inserts multiple ones into a shape vector.

  Inserts an all-1 vector of length num_dims at position start_dim into a shape.
  Can be combined with tf.reshape to generalize tf.expand_dims.

  Args:
    orig_shape: the shape into which the all-1 vector is added (int32 vector)
    start_dim: insertion position (int scalar)
    num_dims: length of the inserted all-1 vector (int scalar)
  Returns:
    An int32 vector of length tf.size(orig_shape) + num_dims.
  """
  with tf.name_scope('ExpandedShape'):
    start_dim = tf.expand_dims(start_dim, 0)  # scalar to rank-1
    before = tf.slice(orig_shape, [0], start_dim)
    add_shape = tf.ones(tf.reshape(num_dims, [1]), dtype=tf.int32)
    after = tf.slice(orig_shape, start_dim, [-1])
    new_shape = tf.concat([before, add_shape, after], 0)
    return new_shape
shape_utils.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def pad_tensor(t, length):
  """Pads the input tensor with 0s along the first dimension up to the length.

  Args:
    t: the input tensor, assuming the rank is at least 1.
    length: a tensor of shape [1]  or an integer, indicating the first dimension
      of the input tensor t after padding, assuming length <= t.shape[0].

  Returns:
    padded_t: the padded tensor, whose first dimension is length. If the length
      is an integer, the first dimension of padded_t is set to length
      statically.
  """
  t_rank = tf.rank(t)
  t_shape = tf.shape(t)
  t_d0 = t_shape[0]
  pad_d0 = tf.expand_dims(length - t_d0, 0)
  pad_shape = tf.cond(
      tf.greater(t_rank, 1), lambda: tf.concat([pad_d0, t_shape[1:]], 0),
      lambda: tf.expand_dims(length - t_d0, 0))
  padded_t = tf.concat([t, tf.zeros(pad_shape, dtype=t.dtype)], 0)
  if not _is_tensor(length):
    padded_t = _set_dim_0(padded_t, length)
  return padded_t
shape_utils.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def clip_tensor(t, length):
  """Clips the input tensor along the first dimension up to the length.

  Args:
    t: the input tensor, assuming the rank is at least 1.
    length: a tensor of shape [1]  or an integer, indicating the first dimension
      of the input tensor t after clipping, assuming length <= t.shape[0].

  Returns:
    clipped_t: the clipped tensor, whose first dimension is length. If the
      length is an integer, the first dimension of clipped_t is set to length
      statically.
  """
  clipped_t = tf.gather(t, tf.range(length))
  if not _is_tensor(length):
    clipped_t = _set_dim_0(clipped_t, length)
  return clipped_t
shape_utils.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def pad_or_clip_tensor(t, length):
  """Pad or clip the input tensor along the first dimension.

  Args:
    t: the input tensor, assuming the rank is at least 1.
    length: a tensor of shape [1]  or an integer, indicating the first dimension
      of the input tensor t after processing.

  Returns:
    processed_t: the processed tensor, whose first dimension is length. If the
      length is an integer, the first dimension of the processed tensor is set
      to length statically.
  """
  processed_t = tf.cond(
      tf.greater(tf.shape(t)[0], length),
      lambda: clip_tensor(t, length),
      lambda: pad_tensor(t, length))
  if not _is_tensor(length):
    processed_t = _set_dim_0(processed_t, length)
  return processed_t
cans.py 文件源码 项目:canton 作者: ctmakro 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self,i):
        # assume hidden, input is of shape [batch,num_h] and [batch,num_in]
        hidden = i[0]
        inp = i[1]
        wz,wr,w = self.wz,self.wr,self.w
        dims = tf.rank(inp)
        c = tf.concat([hidden,inp],axis=dims-1)
        z = tf.sigmoid(wz(c))
        r = tf.sigmoid(wr(c))
        h_c = tf.tanh(w(tf.concat([hidden*r,inp],axis=dims-1)))
        h_new = (1-z) * hidden + z * h_c
        return h_new

# GRU2 as reported in *Gate-Variants of Gated Recurrent Unit (GRU) Neural Networks*
# the gates are now only driven by hidden state.
# mod: removed reset gate.
# conclusion 20171220: GRU2(without reset gate) is almost as good as GRU.
transforms.py 文件源码 项目:GPflowOpt 作者: GPflow 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def build_backward_variance(self, Yvar):
        """
        Additional method for scaling variance backward (used in :class:`.Normalizer`). Can process both the diagonal
        variances returned by predict_f, as well as full covariance matrices.

        :param Yvar: size N x N x P or size N x P
        :return: Yvar scaled, same rank and size as input
        """
        rank = tf.rank(Yvar)
        # Because TensorFlow evaluates both fn1 and fn2, the transpose can't be in the same line. If a full cov
        # matrix is provided fn1 turns it into a rank 4, then tries to transpose it as a rank 3.
        # Splitting it in two steps however works fine.
        Yvar = tf.cond(tf.equal(rank, 2), lambda: tf.matrix_diag(tf.transpose(Yvar)), lambda: Yvar)
        Yvar = tf.cond(tf.equal(rank, 2), lambda: tf.transpose(Yvar, perm=[1, 2, 0]), lambda: Yvar)

        N = tf.shape(Yvar)[0]
        D = tf.shape(Yvar)[2]
        L = tf.cholesky(tf.square(tf.transpose(self.A)))
        Yvar = tf.reshape(Yvar, [N * N, D])
        scaled_var = tf.reshape(tf.transpose(tf.cholesky_solve(L, tf.transpose(Yvar))), [N, N, D])
        return tf.cond(tf.equal(rank, 2), lambda: tf.reduce_sum(scaled_var, axis=1), lambda: scaled_var)
utils.py 文件源码 项目:TFMaskRCNN 作者: hillox 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  cropped_shape = control_flow_ops.with_dependencies(
      [rank_assertion],
      tf.stack([crop_height, crop_width, original_shape[2]]))

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  image = control_flow_ops.with_dependencies(
      [size_assertion],
      tf.slice(image, offsets, cropped_shape))
  return tf.reshape(image, cropped_shape)
prior.py 文件源码 项目:attend_infer_repeat 作者: akosiorek 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def sample(self, n=None):
        if self._bernoulli is None:
            self._bernoulli = Bernoulli(self._steps_probs)

        sample = self._bernoulli.sample(n)
        sample = tf.cumprod(sample, tf.rank(sample) - 1)
        sample = tf.reduce_sum(sample, -1)
        return sample
helpers.py 文件源码 项目:multimodal_varinf 作者: tmoer 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def repeat_v2(x,k):
    ''' repeat k times along first dimension '''
    def change(x,k):    
        shape = x.get_shape().as_list()[1:]
        x_1 = tf.expand_dims(x,1)
        tile_shape = tf.concat([tf.ones(1,dtype='int32'),[k],tf.ones([tf.rank(x)-1],dtype='int32')],axis=0)
        x_rep = tf.tile(x_1,tile_shape)
        new_shape = np.insert(shape,0,-1)
        x_out = tf.reshape(x_rep,new_shape)    
        return x_out

    return tf.cond(tf.equal(k,1),
                   lambda: x,
                   lambda: change(x,k))


问题


面经


文章

微信
公众号

扫码关注公众号