python类mod()的实例源码

lstm.py 文件源码 项目:relaax 作者: deeplearninc 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM)."""
        with tf.variable_scope(scope or type(self).__name__):  # "DilatedLSTMCell"
            # Parameters of gates are concatenated into one multiply for efficiency.
            c, h = tf.split(state, 2, axis=1)
            concat = self._linear([inputs, h], 4 * self._num_units, True)

            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            i, j, f, o = tf.split(concat, 4, axis=1)

            new_c = c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) * tf.tanh(j)
            new_h = tf.tanh(new_c) * tf.sigmoid(o)

            # update relevant cores
            timestep = tf.assign_add(self._timestep, 1)
            core_to_update = tf.mod(timestep, self._cores)

            updated_h = self._hold_mask[core_to_update] * h + self._dilated_mask[core_to_update] * new_h

            return updated_h, tf.concat([new_c, updated_h], axis=1)
bingrad_common.py 文件源码 项目:terngrad 作者: wenwei202 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def ternary_encoder(input_data):
  """Encoding and compressing the signs """
  a = tf.sign(input_data) # -1, 0, 1
  a = tf.add(a,1) # shift -1,0,1 to 0,1,2 (2'b00,2'b01,2'b10)
  a = tf.reshape(a,[-1])
  pad_size = 4 - tf.mod(tf.size(a), 4)
  pad = tf.range(0.0, pad_size)
  a = tf.concat([a, pad], 0)
  a_split1, a_split2, a_split3, a_split4 = tf.split(a,4) # assume the size is dividable by 4

  # encode 4 grads into 1 Byte
  sum_1 = tf.add(a_split1, a_split2*4)
  sum_2 = tf.add(a_split3*16, a_split4*64)
  sum_all = tf.add(sum_1, sum_2)
  encoded = tf.cast(sum_all, tf.uint8)
  return encoded
bingrad_common.py 文件源码 项目:terngrad 作者: wenwei202 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def ternary_decoder(encoded_data, scaler, shape):
  """Decoding the signs to float format """
  a = tf.cast(encoded_data, tf.int32)
  a_split1 = tf.mod(a,4)
  a_split2 = tf.to_int32(tf.mod(a/4,4))
  a_split3 = tf.to_int32(tf.mod(a/16,4))
  a_split4 = tf.to_int32(tf.mod(a/64,4))
  a = tf.concat([a_split1, a_split2, a_split3, a_split4], 0)
  real_size = tf.reduce_prod(shape)
  a = tf.to_float(a)
  a = tf.gather(a, tf.range(0,real_size))

  a = tf.reshape(a, shape)
  a = tf.subtract(a,1)
  decoded = a*scaler
  return decoded
test_ternary_encoder_decoder.py 文件源码 项目:terngrad 作者: wenwei202 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def ternary_encoder(input_data):
  """Encoding and compressing the signs """
  a = tf.sign(input_data) # -1, 0, 1
  a = tf.add(a,1) # shift -1,0,1 to 0,1,2 (2'b00,2'b01,2'b10)
  a = tf.reshape(a,[-1])
  pad_size = 4 - tf.mod(tf.size(a), 4)
  pad = tf.range(0.0, pad_size)
  a = tf.concat([a, pad], 0)
  a_split1, a_split2, a_split3, a_split4 = tf.split(a,4) # assume the size is dividable by 4

  # encode 4 grads into 1 Byte
  sum_1 = tf.add(a_split1, a_split2*4)
  sum_2 = tf.add(a_split3*16, a_split4*64)
  sum_all = tf.add(sum_1, sum_2)
  encoded = tf.cast(sum_all, tf.uint8)
  return encoded
spherical.py 文件源码 项目:monodepth360 作者: srijanparmeshwar 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def fast_rotate(input_image, dx = 0, dy = 0):
    # Basic rotations (constant disparities) for equirectangular
    # images. For image augmentations (y-axis rotations), this method is preferable compared
    # to the more general rotation function.
    height = tf.shape(input_image)[0]
    width = tf.shape(input_image)[1]

    # Shift coordinate grid for inverse warp.
    ix, iy = tf.meshgrid(tf.range(width), tf.range(height))
    ox = tf.mod(ix - dx, width)
    oy = tf.mod(iy - dy, height)
    indices = tf.stack([oy, ox], 2)

    # Perform exact sampling (as we are using integer coordinates).
    return tf.gather_nd(input_image, indices)

# Project equirectangular image onto a cube face.
PhasedLSTMCell.py 文件源码 项目:PLSTM 作者: Enny1991 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def random_exp_initializer(minval=0, maxval=None, seed=None,
                           dtype=dtypes.float32):
    """Returns an initializer that generates tensors with an exponential distribution.

    Args:
      minval: A python scalar or a scalar tensor. Lower bound of the range
        of random values to generate.
      maxval: A python scalar or a scalar tensor. Upper bound of the range
        of random values to generate.  Defaults to 1 for float types.
      seed: A Python integer. Used to create random seeds. See
        [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
        for behavior.
      dtype: The data type.

    Returns:
      An initializer that generates tensors with an exponential distribution.
    """

    def _initializer(shape, dtype=dtype, partition_info=None):
        return tf.exp(random_ops.random_uniform(shape, minval, maxval, dtype, seed=seed))

    return _initializer


# Here we need to register the gradient for the mod operation
PhasedLSTMCell_v1.py 文件源码 项目:PLSTM 作者: Enny1991 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def random_exp_initializer(minval=0, maxval=None, seed=None,
                           dtype=dtypes.float32):
    """Returns an initializer that generates tensors with an exponential distribution.

    Args:
      minval: A python scalar or a scalar tensor. Lower bound of the range
        of random values to generate.
      maxval: A python scalar or a scalar tensor. Upper bound of the range
        of random values to generate.  Defaults to 1 for float types.
      seed: A Python integer. Used to create random seeds. See
        [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
        for behavior.
      dtype: The data type.

    Returns:
      An initializer that generates tensors with an exponential distribution.
    """

    def _initializer(shape, dtype=dtype, partition_info=None):
        return tf.exp(random_ops.random_uniform(shape, minval, maxval, dtype, seed=seed))

    return _initializer


# Here we need to register the gradient for the mod operation
ops.py 文件源码 项目:DMNN 作者: magnux 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def tf_mod(x, y, name=None):
    """Differentiable mod based in numpy
    Args
        x: first argument
        y: second argument
    Returns
        mod between x and y
    """

    def np_mod(x, y):
        return np.mod(x, y, dtype=np.float32)

    def modgrad(op, grad):
        x = op.inputs[0] # the first argument (normally you need those to calculate the gradient, like the gradient of x^2 is 2x. )
        y = op.inputs[1] # the second argument

        return grad * 1, grad * 0 #the propagated gradient with respect to the first and second argument respectively

    def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
        # Need to generate a unique name to avoid duplicates:
        rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))

        tf.RegisterGradient(rnd_name)(grad)  # see _MySquareGrad for grad example
        g = tf.get_default_graph()
        with g.gradient_override_map({"PyFunc": rnd_name}):
            return tf.py_func(func, inp, Tout, stateful=stateful, name=name)

    with ops.name_scope(name, "mod", [x,y]) as name:
        z = py_func(np_mod,
                    [x,y],
                    [tf.float32],
                    name=name,
                    grad=modgrad)  # <-- here's the call to the gradient
        return tf.reshape(z[0], tf.shape(x))
train.py 文件源码 项目:triplet-reid 作者: VisualComputingInstitute 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def sample_k_fids_for_pid(pid, all_fids, all_pids, batch_k):
    """ Given a PID, select K FIDs of that specific PID. """
    possible_fids = tf.boolean_mask(all_fids, tf.equal(all_pids, pid))

    # The following simply uses a subset of K of the possible FIDs
    # if more than, or exactly K are available. Otherwise, we first
    # create a padded list of indices which contain a multiple of the
    # original FID count such that all of them will be sampled equally likely.
    count = tf.shape(possible_fids)[0]
    padded_count = tf.cast(tf.ceil(batch_k / count), tf.int32) * count
    full_range = tf.mod(tf.range(padded_count), count)

    # Sampling is always performed by shuffling and taking the first k.
    shuffled = tf.random_shuffle(full_range)
    selected_fids = tf.gather(possible_fids, shuffled[:batch_k])

    return selected_fids, tf.fill([batch_k], pid)
phased_lstm.py 文件源码 项目:tensorflow-phased-lstm 作者: philipperemy 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def random_exp_initializer(minval=0, maxval=None, seed=None,
                           dtype=dtypes.float32):
    '''Returns an initializer that generates tensors with an exponential distribution.
    Args:
      minval: A python scalar or a scalar tensor. Lower bound of the range
        of random values to generate.
      maxval: A python scalar or a scalar tensor. Upper bound of the range
        of random values to generate.  Defaults to 1 for float types.
      seed: A Python integer. Used to create random seeds. See
        [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
        for behavior.
      dtype: The data type.
    Returns:
      An initializer that generates tensors with an exponential distribution.
    '''

    def _initializer(shape, dtype=dtype, partition_info=None):
        return tf.exp(random_ops.random_uniform(shape, minval, maxval, dtype, seed=seed))

    return _initializer


# Register the gradient for the mod operation. tf.mod() does not have a gradient implemented.
layers.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _process_batch(self, batch):
    # We have to call tf.abs before calling tf.mod, because tf.mod gives
    # native outputs when given negative inputs.
    if self._cast: batch = tf.cast(batch, tf.int32)
    if self._mod_inputs: batch = tf.mod(tf.abs(batch), self._num_buckets)
    return tf.gather(self._weights, batch)
utils.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __mod__(self, other):
        return tf.mod(self, other)
utils.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __rmod__(self, other):
        return tf.mod(other, self)
utils.py 文件源码 项目:tf.rasterizer 作者: vahidk 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def unpack_colors(color, axis, normalize=True):
    r = tf.mod(color, 256)
    g = tf.mod(tf.floordiv(color, 256), 256)
    b = tf.mod(tf.floordiv(color, 256 ** 2), 256 ** 2)
    color = tf.stack([r, g, b], axis=axis)
    if normalize:
        color = tf.div(tf.to_float(color), 255.)
    return color
test_ternary_encoder_decoder.py 文件源码 项目:terngrad 作者: wenwei202 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def ternary_decoder(encoded_data, scaler, shape):
  """Decoding the signs to float format """
  a = tf.cast(encoded_data, tf.int32)
  a_split1 = tf.mod(a,4)
  a_split2 = tf.to_int32(tf.mod(a/4,4))
  a_split3 = tf.to_int32(tf.mod(a/16,4))
  a_split4 = tf.to_int32(tf.mod(a/64,4))
  a = tf.concat([a_split1, a_split2, a_split3, a_split4], 0)
  real_size = tf.reduce_prod(shape)
  a = tf.to_float(a)
  a = tf.gather(a, tf.range(0,real_size))
  a = tf.reshape(a, shape)
  a = tf.subtract(a, 1)
  decoded = a*scaler
  return decoded
dataset.py 文件源码 项目:instacart-basket-prediction 作者: colinmorris 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def rejection_resample(self, ds):
    nclasses = 1000
    def _classfunc(*tensors):
      as_dict = self.dictify(tensors)
      uids = as_dict['uid']
      return tf.mod(uids, nclasses)
    target_dist = tf.constant(1/nclasses, shape=(nclasses,))
    return tf.contrib.data.rejection_resample(ds, _classfunc, target_dist)
core_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def setUp(self):
    super(CoreBinaryOpsTest, self).setUp()

    self.x_probs_broadcast_tensor = tf.reshape(
        self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size])

    self.channel_probs_broadcast_tensor = tf.reshape(
        self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size])

    # == and != are not element-wise for tf.Tensor, so they shouldn't be
    # elementwise for LabeledTensor, either.
    self.ops = [
        ('add', operator.add, tf.add, core.add),
        ('sub', operator.sub, tf.sub, core.sub),
        ('mul', operator.mul, tf.mul, core.mul),
        ('div', operator.truediv, tf.div, core.div),
        ('mod', operator.mod, tf.mod, core.mod),
        ('pow', operator.pow, tf.pow, core.pow_function),
        ('equal', None, tf.equal, core.equal),
        ('less', operator.lt, tf.less, core.less),
        ('less_equal', operator.le, tf.less_equal, core.less_equal),
        ('not_equal', None, tf.not_equal, core.not_equal),
        ('greater', operator.gt, tf.greater, core.greater),
        ('greater_equal', operator.ge, tf.greater_equal, core.greater_equal),
    ]
    self.test_lt_1 = self.x_probs_lt
    self.test_lt_2 = self.channel_probs_lt
    self.test_lt_1_broadcast = self.x_probs_broadcast_tensor
    self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor
    self.broadcast_axes = [self.a0, self.a1, self.a3]
replay_buffer.py 文件源码 项目:openai-rl 作者: morgangiraud 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def append(self, tensors):
        position = tf.mod(self.index, self.capacity)
        append_ops = [self.buffers[key][position].assign(tensor) for key, tensor in zip(self.buffers, tensors)]
        with tf.control_dependencies(append_ops):
            inc_index_op = self.index.assign_add(1)

        return inc_index_op
combination.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _tile_encoders_for_beamsearch(self, projected_sentinel):
        sentinel_batch_size = tf.shape(projected_sentinel)[0]
        encoders_batch_size = tf.shape(
            self.encoder_projections_for_ctx[0])[0]

        modulo = tf.mod(sentinel_batch_size, encoders_batch_size)

        with tf.control_dependencies([tf.assert_equal(modulo, 0)]):
            beam_size = tf.div(sentinel_batch_size,
                               encoders_batch_size)

        return [tf.tile(proj, [beam_size, 1, 1])
                for proj in self.encoder_projections_for_ctx]
combination.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _tile_encoders_for_beamsearch(self, projected_sentinel):
        sentinel_batch_size = tf.shape(projected_sentinel)[0]
        encoders_batch_size = tf.shape(
            self.encoder_projections_for_ctx[0])[0]

        modulo = tf.mod(sentinel_batch_size, encoders_batch_size)

        with tf.control_dependencies([tf.assert_equal(modulo, 0)]):
            beam_size = tf.div(sentinel_batch_size,
                               encoders_batch_size)

        return [tf.tile(proj, [beam_size, 1, 1])
                for proj in self.encoder_projections_for_ctx]
combination.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _tile_encoders_for_beamsearch(self, projected_sentinel):
        sentinel_batch_size = tf.shape(projected_sentinel)[0]
        encoders_batch_size = tf.shape(
            self.encoder_projections_for_ctx[0])[0]

        modulo = tf.mod(sentinel_batch_size, encoders_batch_size)

        with tf.control_dependencies([tf.assert_equal(modulo, 0)]):
            beam_size = tf.div(sentinel_batch_size,
                               encoders_batch_size)

        return [tf.tile(proj, [beam_size, 1, 1])
                for proj in self.encoder_projections_for_ctx]
spherical.py 文件源码 项目:monodepth360 作者: srijanparmeshwar 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def lat_long_to_equirectangular_uv(S, T):
    # Convert latitude and longitude to UV coordinates
    # on an equirectangular plane.
    u = tf.mod(S / (2.0 * np.pi) - 0.25, 1.0)
    v = tf.mod(T / np.pi, 1.0)
    return u, v

# General rotation function given angles in (x, y, z) axes.
attention.py 文件源码 项目:THUMT 作者: thumt 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def add_timing_signal(x, min_timescale=1.0, max_timescale=1.0e4, name=None):
    """
    This function adds a bunch of sinusoids of different frequencies to a
    Tensor. See paper: Attention is all you need

    :param x: A tensor with shape [batch, length, channels]
    :param min_timescale: A floating point number
    :param max_timescale: A floating point number
    :param name: An optional string

    :returns: a Tensor the same shape as x.
    """

    with tf.name_scope(name, default_name="add_timing_signal", values=[x]):
        length = tf.shape(x)[1]
        channels = tf.shape(x)[2]
        position = tf.to_float(tf.range(length))
        num_timescales = channels // 2

        log_timescale_increment = (
            math.log(float(max_timescale) / float(min_timescale)) /
            (tf.to_float(num_timescales) - 1)
        )
        inv_timescales = min_timescale * tf.exp(
            tf.to_float(tf.range(num_timescales)) * -log_timescale_increment
        )

        scaled_time = (tf.expand_dims(position, 1) *
                       tf.expand_dims(inv_timescales, 0))
        signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
        signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
        signal = tf.reshape(signal, [1, length, channels])

        return x + signal
seq2seq_helpers.py 文件源码 项目:DeepDeepParser 作者: janmbuys 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def gather_forced_att_logits(encoder_input_symbols, encoder_decoder_vocab_map, 
                             att_logit, batch_size, attn_length, 
                             target_vocab_size):
  """Gathers attention weights as logits for forced attention."""
  flat_input_symbols = tf.reshape(encoder_input_symbols, [-1])
  flat_label_symbols = tf.gather(encoder_decoder_vocab_map,
      flat_input_symbols)
  flat_att_logits = tf.reshape(att_logit, [-1])

  flat_range = tf.to_int64(tf.range(tf.shape(flat_label_symbols)[0]))
  batch_inds = tf.floordiv(flat_range, attn_length)
  position_inds = tf.mod(flat_range, attn_length)
  attn_vocab_inds = tf.transpose(tf.pack(
      [batch_inds, position_inds, tf.to_int64(flat_label_symbols)]))

  # Exclude indexes of entries with flat_label_symbols[i] = -1.
  included_flat_indexes = tf.reshape(tf.where(tf.not_equal(
      flat_label_symbols, -1)), [-1])
  included_attn_vocab_inds = tf.gather(attn_vocab_inds, 
      included_flat_indexes)
  included_flat_att_logits = tf.gather(flat_att_logits, 
      included_flat_indexes)

  sparse_shape = tf.to_int64(tf.pack(
      [batch_size, attn_length, target_vocab_size]))

  sparse_label_logits = tf.SparseTensor(included_attn_vocab_inds, 
      included_flat_att_logits, sparse_shape)
  forced_att_logit_sum = tf.sparse_reduce_sum(sparse_label_logits, [1])

  forced_att_logit = tf.reshape(forced_att_logit_sum, 
      [-1, target_vocab_size])

  return forced_att_logit
ops.py 文件源码 项目:DMNN 作者: magnux 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def dk_mod(x, y):
    """Differentiable mod, Donald Knuth style
    Args
        x: first argument
        y: second argument
    Returns
        mod between x and y
    """
    return x - y * tf.floor(x / y)

# Register the gradient for the mod operation. tf.mod() does not have a gradient implemented.
common_attention.py 文件源码 项目:tensor2tensor 作者: tensorflow 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def add_timing_signal_1d_given_position(x,
                                        position,
                                        min_timescale=1.0,
                                        max_timescale=1.0e4):
  """Adds sinusoids of diff frequencies to a Tensor, with timing position given.

  Args:
    x: a Tensor with shape [batch, length, channels]
    position: a Tensor with shape [batch, length]
    min_timescale: a float
    max_timescale: a float

  Returns:
    a Tensor the same shape as x.
  """
  channels = common_layers.shape_list(x)[2]
  num_timescales = channels // 2
  log_timescale_increment = (
      math.log(float(max_timescale) / float(min_timescale)) /
      (tf.to_float(num_timescales) - 1))
  inv_timescales = min_timescale * tf.exp(
      tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
  scaled_time = (
      tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims(
          tf.expand_dims(inv_timescales, 0), 0))
  signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2)
  signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]])
  return x + signal
common_attention.py 文件源码 项目:tensor2tensor 作者: tensorflow 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def attention_image_summary(attn, image_shapes=None):
  """Compute color image summary.

  Args:
    attn: a Tensor with shape [batch, num_heads, query_length, memory_length]
    image_shapes: optional tuple of integer scalars.
      If the query positions and memory positions represent the
      pixels of flattened images, then pass in their dimensions:
        (query_rows, query_cols, memory_rows, memory_cols).
      If the query positions and memory positions represent the
      pixels x channels of flattened images, then pass in their dimensions:
        (query_rows, query_cols, query_channels,
         memory_rows, memory_cols, memory_channels).
  """
  num_heads = common_layers.shape_list(attn)[1]
  # [batch, query_length, memory_length, num_heads]
  image = tf.transpose(attn, [0, 2, 3, 1])
  image = tf.pow(image, 0.2)  # for high-dynamic-range
  # Each head will correspond to one of RGB.
  # pad the heads to be a multiple of 3
  image = tf.pad(image, [[0, 0], [0, 0], [0, 0], [0, tf.mod(-num_heads, 3)]])
  image = split_last_dimension(image, 3)
  image = tf.reduce_max(image, 4)
  if image_shapes is not None:
    if len(image_shapes) == 4:
      q_rows, q_cols, m_rows, m_cols = list(image_shapes)
      image = tf.reshape(image, [-1, q_rows, q_cols, m_rows, m_cols, 3])
      image = tf.transpose(image, [0, 1, 3, 2, 4, 5])
      image = tf.reshape(image, [-1, q_rows * m_rows, q_cols * m_cols, 3])
    else:
      assert len(image_shapes) == 6
      q_rows, q_cols, q_channnels, m_rows, m_cols, m_channels = list(
          image_shapes)
      image = tf.reshape(
          image,
          [-1, q_rows, q_cols, q_channnels, m_rows, m_cols, m_channels, 3])
      image = tf.transpose(image, [0, 1, 4, 3, 2, 5, 6, 7])
      image = tf.reshape(
          image,
          [-1, q_rows * m_rows * q_channnels, q_cols * m_cols * m_channels, 3])
  tf.summary.image("attention", image, max_outputs=1)
test_dlstm.py 文件源码 项目:meta-learning 作者: ioanachelu 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def fast_dlstm(s_t, state_in):
        def dilate_one_time_step(one_h, switcher, num_chunks):
            h_slices = []
            h_size = 256
            chunk_step_size = h_size // num_chunks
            for switch_step, h_step in zip(range(num_chunks), range(0, h_size, chunk_step_size)):
                one_switch = switcher[switch_step]
                h_s = conditional_backprop(one_switch, one_h[h_step: h_step + chunk_step_size])
                h_slices.append(h_s)
            dh = tf.stack(h_slices)
            dh = tf.reshape(dh, [-1, 256])
            return dh

        lstm = rnn.LSTMCell(256, state_is_tuple=True)
        chunks = 8

        def dlstm_scan_fn(previous_output, current_input):
            out, state_out = lstm(current_input, previous_output[1])
            i = previous_output[2]
            basis_i = tf.one_hot(i, depth=chunks)
            state_out_dilated = dilate_one_time_step(tf.squeeze(state_out[0]), basis_i, chunks)
            state_out = rnn.LSTMStateTuple(state_out_dilated, state_out[1])
            i += tf.constant(1)
            new_i = tf.mod(i, chunks)
            return out, state_out, new_i

        rnn_outputs, final_states, mod_idxs = tf.scan(dlstm_scan_fn,
                                                      tf.transpose(s_t, [1, 0, 2]),
                                                      initializer=(
                                                      state_in[1], rnn.LSTMStateTuple(*state_in), tf.constant(0)))

        state_out = [final_states[0][-1, 0, :], final_states[1][-1, 0, :]]
        cell_states = final_states[0][:, 0, :]
        out_states = final_states[1][:, 0, :]
        return out_states, cell_states, state_out
train.py 文件源码 项目:EDSR 作者: iwtw 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _filter_function(n_gpus):
    def f(x,y):
        a = tf.equal( tf.mod( tf.shape(x)[0] , n_gpus ) , 0 )  
        b = tf.equal( tf.mod( tf.shape(y)[0] , n_gpus ) , 0 )  
        return tf.logical_and(a,b)
    return f
ops.py 文件源码 项目:tfdeploy 作者: riga 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_Mod(self):
        t = tf.mod(*self.random((4, 3), (4, 3)))
        self.check(t)


问题


面经


文章

微信
公众号

扫码关注公众号