python类assert_equal()的实例源码

test_tensor_node.py 文件源码 项目:nengo_dl 作者: nengo 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_reshaped():
    x = tf.zeros((5, 12))

    @reshaped((4, 3))
    def my_func(_, a):
        with tf.control_dependencies([tf.assert_equal(tf.shape(a),
                                                      (5, 4, 3))]):
            return tf.identity(a)

    y = my_func(None, x)

    with tf.Session() as sess:
        sess.run(y)
ops.py 文件源码 项目:sText2Image 作者: elliottwu 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def kl_divergence(p, q):
    tf.assert_rank(p,2)
    tf.assert_rank(q,2)

    p_shape = tf.shape(p)
    q_shape = tf.shape(q)
    tf.assert_equal(p_shape, q_shape)

    # normalize sum to 1
    p_ = tf.divide(p, tf.tile(tf.expand_dims(tf.reduce_sum(p,axis=1), 1), [1,p_shape[1]]))
    q_ = tf.divide(q, tf.tile(tf.expand_dims(tf.reduce_sum(q,axis=1), 1), [1,p_shape[1]]))

    return tf.reduce_sum(tf.multiply(p_, tf.log(tf.divide(p_, q_))), axis=1)
utils.py 文件源码 项目:lang2program 作者: kelvinguu 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def assert_broadcastable(low_tensor, high_tensor):
    low_shape = tf.shape(low_tensor)
    high_shape = tf.shape(high_tensor)

    low_rank = tf.rank(low_tensor)

    # assert that shapes are compatible
    high_shape_prefix = tf.slice(high_shape, [0], [low_rank])
    assert_op = tf.assert_equal(high_shape_prefix, low_shape, name="assert_shape_prefix")
    return assert_op
seq_batch.py 文件源码 项目:lang2program 作者: kelvinguu 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, values, mask, name='SequenceBatch'):
        with tf.name_scope(name):
            # check that dimensions are correct
            values_shape = tf.shape(values)
            mask_shape = tf.shape(mask)
            values_shape_prefix = tf.slice(values_shape, [0], [2])
            max_rank = max(values.get_shape().ndims, mask.get_shape().ndims)

            assert_op = tf.assert_equal(values_shape_prefix, mask_shape,
                                        data=[values_shape_prefix, mask_shape], summarize=max_rank,
                                        name="assert_shape_prefix")

            with tf.control_dependencies([assert_op]):
                    self._values = tf.identity(values, name='values')
                    self._mask = tf.identity(mask, name='mask')
utils.py 文件源码 项目:lang2program 作者: kelvinguu 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def assert_broadcastable(low_tensor, high_tensor):
    low_shape = tf.shape(low_tensor)
    high_shape = tf.shape(high_tensor)

    low_rank = tf.rank(low_tensor)

    # assert that shapes are compatible
    high_shape_prefix = tf.slice(high_shape, [0], [low_rank])
    assert_op = tf.assert_equal(high_shape_prefix, low_shape, name="assert_shape_prefix")
    return assert_op
seq_batch.py 文件源码 项目:lang2program 作者: kelvinguu 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, values, mask, name='SequenceBatch'):
        with tf.name_scope(name):
            # check that dimensions are correct
            values_shape = tf.shape(values)
            mask_shape = tf.shape(mask)
            values_shape_prefix = tf.slice(values_shape, [0], [2])
            max_rank = max(values.get_shape().ndims, mask.get_shape().ndims)

            assert_op = tf.assert_equal(values_shape_prefix, mask_shape,
                                        data=[values_shape_prefix, mask_shape], summarize=max_rank,
                                        name="assert_shape_prefix")

            with tf.control_dependencies([assert_op]):
                    self._values = tf.identity(values, name='values')
                    self._mask = tf.identity(mask, name='mask')
combination.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _tile_encoders_for_beamsearch(self, projected_sentinel):
        sentinel_batch_size = tf.shape(projected_sentinel)[0]
        encoders_batch_size = tf.shape(
            self.encoder_projections_for_ctx[0])[0]

        modulo = tf.mod(sentinel_batch_size, encoders_batch_size)

        with tf.control_dependencies([tf.assert_equal(modulo, 0)]):
            beam_size = tf.div(sentinel_batch_size,
                               encoders_batch_size)

        return [tf.tile(proj, [beam_size, 1, 1])
                for proj in self.encoder_projections_for_ctx]
combination.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _tile_encoders_for_beamsearch(self, projected_sentinel):
        sentinel_batch_size = tf.shape(projected_sentinel)[0]
        encoders_batch_size = tf.shape(
            self.encoder_projections_for_ctx[0])[0]

        modulo = tf.mod(sentinel_batch_size, encoders_batch_size)

        with tf.control_dependencies([tf.assert_equal(modulo, 0)]):
            beam_size = tf.div(sentinel_batch_size,
                               encoders_batch_size)

        return [tf.tile(proj, [beam_size, 1, 1])
                for proj in self.encoder_projections_for_ctx]
combination.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _tile_encoders_for_beamsearch(self, projected_sentinel):
        sentinel_batch_size = tf.shape(projected_sentinel)[0]
        encoders_batch_size = tf.shape(
            self.encoder_projections_for_ctx[0])[0]

        modulo = tf.mod(sentinel_batch_size, encoders_batch_size)

        with tf.control_dependencies([tf.assert_equal(modulo, 0)]):
            beam_size = tf.div(sentinel_batch_size,
                               encoders_batch_size)

        return [tf.tile(proj, [beam_size, 1, 1])
                for proj in self.encoder_projections_for_ctx]
terpret_tf_runtime.py 文件源码 项目:TerpreT 作者: 51alg 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def make_batch_consistent(args, set_batch_size=None):
    """
    args[i] should be either [arg_dim] or [batch_size x arg_dim]
    if rank(args[i]) == 1 then tile to [batch_size x arg_dim]
    """
    if set_batch_size is None:
        # infer the batch_size from arg shapes
        batched_args = filter(lambda x : x.get_shape().ndims > 1, args)
        #batched_args = filter(lambda x : x.get_shape()[0].value is None, args)
        if len(batched_args) == 0:
            batch_size = 1
            is_batched = False
        else:
            # TODO: tf.assert_equal() to check that all batch sizes are consistent?
            batch_size = tf.shape(batched_args[0])[0]
            is_batched = True
    else: 
        batch_size = set_batch_size
        is_batched = True

    # tile any rank-1 args to a consistent batch_size
    tmp_args = []
    for arg in args:
        arg_rank = arg.get_shape().ndims
        assert_rank_1_or_2(arg_rank)
        if arg_rank == 1:
            tmp_args.append(tf.tile(tf.expand_dims(arg,0), [batch_size,1]))
        else:
            tmp_args.append(arg)
    args = tmp_args
    return args, is_batched
pix2pix.py 文件源码 项目:pix2pix-tensorflow 作者: affinelayer 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def check_image(image):
    assertion = tf.assert_equal(tf.shape(image)[-1], 3, message="image must have 3 color channels")
    with tf.control_dependencies([assertion]):
        image = tf.identity(image)

    if image.get_shape().ndims not in (3, 4):
        raise ValueError("image must be either 3 or 4 dimensions")

    # make the last dimension 3 so that you can unstack the colors
    shape = list(image.get_shape())
    shape[-1] = 3
    image.set_shape(shape)
    return image

# based on https://github.com/torch/image/blob/9f65c30167b2048ecbe8b7befdc6b2d6d12baee9/generic/image.c
sequence.py 文件源码 项目:tensorforce 作者: reinforceio 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def tf_process(self, tensor):
        # or just always the same?
        tf.assert_equal(x=tf.shape(input=tensor)[0], y=1)

        states_buffer = tf.get_variable(
            name='states-buffer',
            shape=((self.length,) + util.shape(tensor)[1:]),
            dtype=tensor.dtype,
            trainable=False
        )
        index = tf.get_variable(
            name='index',
            dtype=util.tf_dtype('int'),
            initializer=-1,
            trainable=False
        )

        assignment = tf.cond(
            pred=tf.equal(x=index, y=-1),
            true_fn=(lambda: tf.assign(
                ref=states_buffer,
                value=tf.tile(
                    input=tensor,
                    multiples=((self.length,) + tuple(1 for _ in range(util.rank(tensor) - 1)))
                )
            )),
            false_fn=(lambda: tf.assign(ref=states_buffer[index], value=tensor[0]))
        )

        with tf.control_dependencies(control_inputs=(assignment,)):
            previous_states = [states_buffer[(index - n - 1) % self.length] for n in range(self.length)]
            assignment = tf.assign(ref=index, value=((tf.maximum(x=index, y=0) + 1) % self.length))

        with tf.control_dependencies(control_inputs=(assignment,)):
            return tf.expand_dims(input=tf.concat(values=previous_states, axis=-1), axis=0)
pix2pix.py 文件源码 项目:rascal-tensorflow 作者: stayrascal 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def check_image(image):
    assertion = tf.assert_equal(
        tf.shape(image)[-1], 3, message="image must have 3 color channels")
    with tf.control_dependencies([assertion]):
        image = tf.identity(image)

    if image.get_shape().ndims not in (3, 4):
        raise ValueError("Image must be either 3 or 4 dimensions")

    shape = list(image.get_shape())
    shape[-1] = 3
    image.set_shape(shape)
    return image
common_attention.py 文件源码 项目:tensor2tensor 作者: tensorflow 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def conv_elems_1d(x, factor, out_depth=None):
  """Decrease the length and change the dimensionality.

  Merge/restore/compress factors positions of dim depth of the input into
  a single position of dim out_depth.
  This is basically just a strided convolution without overlapp
  between each strides.
  The original length has to be divided by factor.

  Args:
    x (tf.Tensor): shape [batch_size, length, depth]
    factor (int): Length compression factor.
    out_depth (int): Output depth

  Returns:
    tf.Tensor: shape [batch_size, length//factor, out_depth]
  """
  out_depth = out_depth or x.get_shape().as_list()[-1]
  # with tf.control_dependencies(  # Dynamic assertion
  #     [tf.assert_equal(tf.shape(x)[1] % factor, 0)]):
  x = tf.expand_dims(x, 1)  # [batch_size, 1, length, depth]
  x = tf.layers.conv2d(
      inputs=x,
      filters=out_depth,
      kernel_size=(1, factor),
      strides=(1, factor),
      padding="valid",
      data_format="channels_last",
  )  # [batch_size, 1, length//factor, out_depth]
  x = tf.squeeze(x, 1)  # [batch_size, length//factor, depth]
  return x
pix2pix.py 文件源码 项目:SketchToFace 作者: richliao 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def check_image(image):
    assertion = tf.assert_equal(tf.shape(image)[-1], 3, message="image must have 3 color channels")
    with tf.control_dependencies([assertion]):
        image = tf.identity(image)

    if image.get_shape().ndims not in (3, 4):
        raise ValueError("image must be either 3 or 4 dimensions")

    # make the last dimension 3 so that you can unstack the colors
    shape = list(image.get_shape())
    shape[-1] = 3
    image.set_shape(shape)
    return image

# based on https://github.com/torch/image/blob/9f65c30167b2048ecbe8b7befdc6b2d6d12baee9/generic/image.c
adem_loss.py 文件源码 项目:ADEM 作者: Yoctol 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def tf_static_adem_l1_loss(human_score, model_score, M, N):
    hs_shape = human_score.get_shape().as_list()
    ms_shape = model_score.get_shape().as_list()
    with tf.control_dependencies(
        [tf.assert_equal(len(hs_shape), 1, message='score should be 1D.'),
         tf.assert_equal(len(ms_shape), 1, message='score should be 1D.'),
         tf.assert_equal(hs_shape, ms_shape,
                         message='human and model scores should have an equal amount.')]):
        return compute_adem_l1_loss(human_score, model_score, M, N)
adem_score.py 文件源码 项目:ADEM 作者: Yoctol 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def tf_static_adem_score(context, model_response, reference_response):
    rr_size, rr_dim = reference_response.get_shape().as_list()
    mr_size, mr_dim = model_response.get_shape().as_list()
    ct_size, ct_dim = context.get_shape().as_list()
    with tf.control_dependencies(
        [tf.assert_equal(rr_size, mr_size, message='responses size not equal'),
         tf.assert_equal(ct_size, mr_size, message='context response size not equal')]):
        score, M, N = compute_adem_score(
            context, model_response, reference_response, mr_dim, ct_dim, rr_dim)
    return score, M, N
assertion.py 文件源码 项目:tensorflow-extenteten 作者: raviqqe 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def assert_no_nan(tensor):
    return tf.assert_equal(tf.reduce_any(tf.is_nan(tensor)), False)
readers.py 文件源码 项目:yt8m 作者: forwchen 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def prepare_serialized_examples(self, serialized_example,
      max_quantized_value=2, min_quantized_value=-2):

    contexts, features = tf.parse_single_sequence_example(
        serialized_example,
        context_features={"video_id": tf.FixedLenFeature(
            [], tf.string),
                          "labels": tf.VarLenFeature(tf.int64)},
        sequence_features={
            feature_name : tf.FixedLenSequenceFeature([], dtype=tf.string)
            for feature_name in self.feature_names
        })

    # read ground truth labels
    labels = (tf.cast(
        tf.sparse_to_dense(contexts["labels"].values, (self.num_classes,), 1,
            validate_indices=False),
        tf.bool))

    # loads (potentially) different types of features and concatenates them
    num_features = len(self.feature_names)
    assert num_features > 0, "No feature selected: feature_names is empty!"

    assert len(self.feature_names) == len(self.feature_sizes), \
    "length of feature_names (={}) != length of feature_sizes (={})".format( \
    len(self.feature_names), len(self.feature_sizes))

    num_frames = -1  # the number of frames in the video
    feature_matrices = [None] * num_features  # an array of different features
    for feature_index in range(num_features):
      feature_matrix, num_frames_in_this_feature = self.get_video_matrix(
          features[self.feature_names[feature_index]],
          self.feature_sizes[feature_index],
          self.max_frames,
          max_quantized_value,
          min_quantized_value)
      if num_frames == -1:
        num_frames = num_frames_in_this_feature
      else:
        tf.assert_equal(num_frames, num_frames_in_this_feature)

      feature_matrices[feature_index] = feature_matrix

    # cap the number of frames at self.max_frames
    num_frames = tf.minimum(num_frames, self.max_frames)

    # concatenate different features
    video_matrix = tf.concat(feature_matrices, 1)

    # convert to batch format.
    # TODO: Do proper batch reads to remove the IO bottleneck.
    batch_video_ids = tf.expand_dims(contexts["video_id"], 0)
    batch_video_matrix = tf.expand_dims(video_matrix, 0)
    batch_labels = tf.expand_dims(labels, 0)
    batch_frames = tf.expand_dims(num_frames, 0)

    return batch_video_ids, batch_video_matrix, batch_labels, batch_frames
readers.py 文件源码 项目:youtube-8m 作者: google 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def prepare_serialized_examples(self, serialized_example,
      max_quantized_value=2, min_quantized_value=-2):

    contexts, features = tf.parse_single_sequence_example(
        serialized_example,
        context_features={"video_id": tf.FixedLenFeature(
            [], tf.string),
                          "labels": tf.VarLenFeature(tf.int64)},
        sequence_features={
            feature_name : tf.FixedLenSequenceFeature([], dtype=tf.string)
            for feature_name in self.feature_names
        })

    # read ground truth labels
    labels = (tf.cast(
        tf.sparse_to_dense(contexts["labels"].values, (self.num_classes,), 1,
            validate_indices=False),
        tf.bool))

    # loads (potentially) different types of features and concatenates them
    num_features = len(self.feature_names)
    assert num_features > 0, "No feature selected: feature_names is empty!"

    assert len(self.feature_names) == len(self.feature_sizes), \
    "length of feature_names (={}) != length of feature_sizes (={})".format( \
    len(self.feature_names), len(self.feature_sizes))

    num_frames = -1  # the number of frames in the video
    feature_matrices = [None] * num_features  # an array of different features
    for feature_index in range(num_features):
      feature_matrix, num_frames_in_this_feature = self.get_video_matrix(
          features[self.feature_names[feature_index]],
          self.feature_sizes[feature_index],
          self.max_frames,
          max_quantized_value,
          min_quantized_value)
      if num_frames == -1:
        num_frames = num_frames_in_this_feature
      else:
        tf.assert_equal(num_frames, num_frames_in_this_feature)

      feature_matrices[feature_index] = feature_matrix

    # cap the number of frames at self.max_frames
    num_frames = tf.minimum(num_frames, self.max_frames)

    # concatenate different features
    video_matrix = tf.concat(feature_matrices, 1)

    # convert to batch format.
    # TODO: Do proper batch reads to remove the IO bottleneck.
    batch_video_ids = tf.expand_dims(contexts["video_id"], 0)
    batch_video_matrix = tf.expand_dims(video_matrix, 0)
    batch_labels = tf.expand_dims(labels, 0)
    batch_frames = tf.expand_dims(num_frames, 0)

    return batch_video_ids, batch_video_matrix, batch_labels, batch_frames


问题


面经


文章

微信
公众号

扫码关注公众号