python类reverse()的实例源码

cwt.py 文件源码 项目:cwt-tensorflow 作者: nickgeoca 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def conv1DWavelet(wav, waveletWidth, waveletEquation):
    kernelSamples = waveletWidth * 10
    kernel = waveletEquation(waveletWidth, kernelSamples)
    kernel = tf.reverse(kernel, [0])
    kernel = tf.reshape(kernel, tf.stack([kernelSamples,1,1,1]))

    conv = tf.nn.conv2d(wav, kernel, [1,1,1,1], padding='SAME') 
    conv = tf.squeeze(tf.squeeze(conv))

    return conv
dataset.py 文件源码 项目:PSPNet-Keras-tensorflow 作者: Vladkryvoruchko 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def load_image(self, image_path, is_jpeg):
        # Read the file
        file_data = tf.read_file(image_path)
        # Decode the image data
        img = tf.cond(
            is_jpeg,
            lambda: tf.image.decode_jpeg(file_data, channels=self.data_spec.channels),
            lambda: tf.image.decode_png(file_data, channels=self.data_spec.channels))
        if self.data_spec.expects_bgr:
            # Convert from RGB channel ordering to BGR
            # This matches, for instance, how OpenCV orders the channels.
            img = tf.reverse(img, [False, False, True])
        return img
image_reader.py 文件源码 项目:tf_base 作者: ozansener 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def process(self):
    idx, image_path = self.path_queue.dequeue()
    img = tf.image.decode_jpeg(tf.read_file(image_path), channels=3) # It is an RGB PNG
    img = tf.reverse(img, [False, False, True]) # RGB -> BGR
    return (idx, ImageReader.process_single_image(img, self.image_spec['scale_size'], 
                                               self.image_spec['crop_size'], 
                                               self.image_spec['mean']))
tensorflow_backend.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def reverse(x, axes):
    '''Reverse a tensor along the the specified axes
    '''
    if type(axes) == int:
        axes = [axes]
    dims = [True if i in axes else False for i in range(len(x.get_shape()._dims))]
    return tf.reverse(x, dims)


# VALUE MANIPULATION
tensorflow_backend.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def ctc_label_dense_to_sparse(labels, label_lengths):
    # undocumented feature soon to be made public
    from tensorflow.python.ops import functional_ops
    label_shape = tf.shape(labels)
    num_batches_tns = tf.pack([label_shape[0]])
    max_num_labels_tns = tf.pack([label_shape[1]])

    def range_less_than(previous_state, current_input):
        return tf.expand_dims(tf.range(label_shape[1]), 0) < tf.fill(max_num_labels_tns, current_input)

    init = tf.cast(tf.fill([1, label_shape[1]], 0), tf.bool)
    dense_mask = functional_ops.scan(range_less_than, label_lengths,
                                     initializer=init, parallel_iterations=1)
    dense_mask = dense_mask[:, 0, :]

    label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
                             label_shape)
    label_ind = tf.boolean_mask(label_array, dense_mask)

    batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]),
                                                  max_num_labels_tns), tf.reverse(label_shape, [True])))
    batch_ind = tf.boolean_mask(batch_array, dense_mask)
    indices = tf.transpose(tf.reshape(tf.concat(0, [batch_ind, label_ind]), [2, -1]))

    vals_sparse = tf.gather_nd(labels, indices)

    return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))
layers.py 文件源码 项目:third_person_im 作者: bstadie 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_output_for(self, input, **kwargs):
        axis = self.axis
        ndims = input.get_shape().ndims
        if axis < 0:
            axis += ndims
        if isinstance(self.slice, int) and self.slice < 0:
            return tf.reverse(input, [False] * self.axis + [True] + [False] * (ndims - axis - 1))[
                (slice(None),) * axis + (-1 - self.slice,) + (slice(None),) * (ndims - axis - 1)
                ]
        # import ipdb; ipdb.set_trace()
        return input[(slice(None),) * axis + (self.slice,) + (slice(None),) * (ndims - axis - 1)]
crbm.py 文件源码 项目:CDBN-for-Tensorflow 作者: shygiants 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __gradient_ascent(self):
        # Gradient ascent
        with tf.name_scope('gradient') as _:
            self.grad_bias = tf.mul(tf.reduce_mean(self.hid_prob0 - self.hid_prob1, [0, 1, 2]),
                                    self.learning_rate * self.batch_size, name='grad_bias')
            self.grad_cias = tf.mul(tf.reduce_mean(self.vis_0 - self.vis_1, [0, 1, 2]),
                                    self.learning_rate * self.batch_size, name='grad_cias')

            # TODO: Is there any method to calculate batch-elementwise convolution?
            temp_grad_weights = tf.zeros(self.weight_shape)
            hid_filter0 = tf.reverse(self.hid_prob0, [False, True, True, False])
            hid_filter1 = tf.reverse(self.hid_prob1, [False, True, True, False])
            for idx in range(0, self.batch_size):
                hid0_ith = self.__get_ith_hid_4d(hid_filter0, idx)
                hid1_ith = self.__get_ith_hid_4d(hid_filter1, idx)

                positive = [0] * self.depth
                negative = [0] * self.depth
                one_ch_conv_shape = [self.width, self.height, 1, self.num_features]
                for jdx in range(0, self.depth):
                    positive[jdx] = tf.reshape(self.__conv2d(self.__get_ij_vis_4d(self.vis_0, idx, jdx), hid0_ith),
                                               one_ch_conv_shape)
                    negative[jdx] = tf.reshape(self.__conv2d(self.__get_ij_vis_4d(self.vis_1, idx, jdx), hid1_ith),
                                               one_ch_conv_shape)
                positive = tf.concat(2, positive)
                negative = tf.concat(2, negative)
                temp_grad_weights = tf.add(temp_grad_weights,
                                           tf.slice(tf.sub(positive, negative), [0, 0, 0, 0], self.weight_shape))

            self.grad_weights = tf.mul(temp_grad_weights, self.learning_rate / (self.width * self.height))
        self.gradient_ascent = [self.weights.assign_add(self.grad_weights),
                                self.bias.assign_add(self.grad_bias),
                                self.cias.assign_add(self.grad_cias)]
conv_seq2seq.py 文件源码 项目:conv_seq2seq 作者: tobyyouup 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def default_params():
    params = Seq2SeqModel.default_params().copy()
    params.update({
        "encoder.class": "seq2seq.encoders.ConvEncoderFairseq",
        "encoder.params": {},  # Arbitrary parameters for the encoder
        "decoder.class": "seq2seq.decoders.ConvDecoder",
        "decoder.params": {},  # Arbitrary parameters for the decoder
        "source.max_seq_len": 50,
        "source.reverse": False,
        "target.max_seq_len": 50,
        "embedding.dim": 256,
        "embedding.init_scale": 0.04,
        "embedding.share": False,
        "position_embeddings.num_positions": 100,
        "inference.beam_search.beam_width": 0,
        "inference.beam_search.length_penalty_weight": 1.0,
        "inference.beam_search.choose_successors_fn": "choose_top_k",
        "vocab_source": "",
        "vocab_target": "", 
        "optimizer.name": "Momentum",
        "optimizer.learning_rate": 0.25,
        "optimizer.params": {"momentum": 0.99, "use_nesterov": True}, # Arbitrary parameters for the optimizer
        #"optimizer.params": { "epsilon": 0.0000008}, # Arbitrary parameters for the optimizer
        "optimizer.lr_decay_type": "exponential_decay",
        "optimizer.lr_decay_steps": 5000,  # one epoch steps
        "optimizer.lr_decay_rate": 0.9,  
        "optimizer.lr_start_decay_at": 0,  # start annealing epoch 0
        "optimizer.lr_stop_decay_at": tf.int32.max,
        "optimizer.lr_min_learning_rate": 1e-5,
        "optimizer.lr_staircase": True,
        "optimizer.clip_gradients": 0.1,
        "optimizer.clip_embed_gradients": 5,
        "optimizer.sync_replicas": 0,
        "optimizer.sync_replicas_to_aggregate": 0,

})
    return params
conv_seq2seq.py 文件源码 项目:conv_seq2seq 作者: tobyyouup 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def encode(self, features, labels):

    features["source_ids"] = tf.reverse_sequence(features["source_ids"], features["source_len"], batch_dim=0, seq_dim=1)  # [[1,2,3,4,PAD,PAD,PAD],[2,3,PAD,PAD,PAD,PAD,PAD]]   [4,2]
    features["source_ids"] = tf.reverse(features["source_ids"],[1])  # --> [[4,3,2,1,PAD,PAD,PAD],[3,2,PAD,PAD,PAD,PAD,PAD]] --> [[PAD,PAD,PAD,1,2,3,4],[PAD,PAD,PAD,PAD,PAD,2,3]]

    source_embedded = tf.nn.embedding_lookup(self.source_embedding_fairseq(),
                                             features["source_ids"])
    encoder_fn = self.encoder_class(self.params["encoder.params"], self.mode, self.source_pos_embedding_fairseq())
    return encoder_fn(source_embedded, features["source_len"])
layers.py 文件源码 项目:rllabplusplus 作者: shaneshixiang 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_output_for(self, input, **kwargs):
        axis = self.axis
        ndims = input.get_shape().ndims
        if axis < 0:
            axis += ndims
        if isinstance(self.slice, int) and self.slice < 0:
            return tf.reverse(input, [self.axis + 1])[
                (slice(None),) * axis + (-1 - self.slice,) + (slice(None),) * (ndims - axis - 1)
                ]
        # import ipdb; ipdb.set_trace()
        return input[(slice(None),) * axis + (self.slice,) + (slice(None),) * (ndims - axis - 1)]
model.py 文件源码 项目:relaax 作者: deeplearninc 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_graph(self):
        # Build graph
        state = graph.Placeholder(np.float32, shape=(2, ))
        reverse = graph.TfNode(tf.reverse(state.node, [0]))

        # Expose public API
        self.op_get_action = self.Op(reverse, state=state)
math.py 文件源码 项目:antgo 作者: jianzfb 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def cummax(x, reverse=False, name=None):
    """Compute the cumulative maximum of the tensor `x` along `axis`. This
    operation is similar to the more classic `cumsum`. Only support 1D Tensor
    for now.

    Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
       `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
       `complex128`, `qint8`, `quint8`, `qint32`, `half`.
       axis: A `Tensor` of type `int32` (default: 0).
       reverse: A `bool` (default: False).
       name: A name for the operation (optional).
    Returns:
    A `Tensor`. Has the same type as `x`.
    """
    with ops.name_scope(name, "Cummax", [x]) as name:
        x = ops.convert_to_tensor(x, name="x")
        # Not very optimal: should directly integrate reverse into tf.scan.
        if reverse:
            x = tf.reverse(x, axis=[0])
        # 'Accumlating' maximum: ensure it is always increasing.
        cmax = tf.scan(lambda a, y: tf.maximum(a, y), x,
                       initializer=None, parallel_iterations=1,
                       back_prop=False, swap_memory=False)
        if reverse:
            cmax = tf.reverse(cmax, axis=[0])
        return cmax
main.py 文件源码 项目:language-translation-english-to-french 作者: Satyaki0924 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def main(self):
        train_graph = tf.Graph()
        save_path = self.path + '/checkpoints/dev'
        source_path = self.path + '/data/small_vocab_en'
        target_path = self.path + '/data/small_vocab_fr'
        PreProcess(source_path, target_path).process_and_save_data()
        _, batch_size, rnn_size, num_layers, encoding_embedding_size, decoding_embedding_size, _, _ = \
            Params().get()
        (source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = \
            self.load_process()
        max_source_sentence_length = max([len(sentence) for sentence in source_int_text])
        with train_graph.as_default():
            input_data, targets, lr, keep_prob = Inputs().get()
            sequence_length = tf.placeholder_with_default(
                max_source_sentence_length, None, name='sequence_length')
            input_shape = tf.shape(input_data)
            train_logits, inference_logits = Seq2seq().seq2seq_model(
                tf.reverse(input_data, [-1]), targets, keep_prob, batch_size,
                sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),
                encoding_embedding_size, decoding_embedding_size,
                rnn_size, num_layers, target_vocab_to_int)
            tf.identity(inference_logits, 'logits')
            with tf.name_scope("optimization"):
                cost = tf.contrib.seq2seq.sequence_loss(train_logits, targets,
                                                        tf.ones([input_shape[0], sequence_length]))
                optimizer = tf.train.AdamOptimizer(lr)
                gradients = optimizer.compute_gradients(cost)
                capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var)
                                    for grad, var in gradients if grad is not None]
                train_op = optimizer.apply_gradients(capped_gradients)
        Train(source_int_text, target_int_text, train_graph, train_op, cost,
              input_data, targets, lr, sequence_length, keep_prob, inference_logits, save_path).train()
image_ops.py 文件源码 项目:rec-attend-public 作者: renmengye 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def random_flip_left_right(image, seed=None):
  uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
  mirror = math_ops.less(tf.pack([1.0, 1.0, uniform_random, 1.0]), 0.5)
  return tf.reverse(image, mirror)
image_ops.py 文件源码 项目:rec-attend-public 作者: renmengye 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def random_flip_up_down(image, seed=None):
  uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
  mirror = math_ops.less(tf.pack([1.0, uniform_random, 1.0, 1.0]), 0.5)
  return tf.reverse(image, mirror)
models.py 文件源码 项目:miccai-2016-surgical-activity-rec 作者: rdipietro 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __init__(self, *args):
        """ Create a reverse LSTM model.

        Args:
            See `LSTMModel`.
        """
        super(ReverseLSTMModel, self).__init__(*args)
models.py 文件源码 项目:miccai-2016-surgical-activity-rec 作者: rdipietro 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _compute_rnn_outputs(self):
        reversed_inputs = tf.reverse(self.inputs, [False, True, False])
        reversed_resets = tf.reverse(self.resets, [False, True, False])
        self._rv_lstm = LSTM(reversed_inputs, reversed_resets, self.training,
                             self.num_layers, self.hidden_layer_size,
                             self.init_scale, self.dropout_keep_prob)
        outputs = tf.reverse(self._rv_lstm.outputs, [False, True, False])
        return outputs
tensorflow_backend.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def ctc_label_dense_to_sparse(labels, label_lengths):
    # undocumented feature soon to be made public
    from tensorflow.python.ops import functional_ops
    label_shape = tf.shape(labels)
    num_batches_tns = stack([label_shape[0]])
    max_num_labels_tns = stack([label_shape[1]])

    def range_less_than(previous_state, current_input):
        return tf.expand_dims(tf.range(label_shape[1]), 0) < tf.fill(max_num_labels_tns, current_input)

    init = tf.cast(tf.fill([1, label_shape[1]], 0), tf.bool)
    dense_mask = functional_ops.scan(range_less_than, label_lengths,
                                     initializer=init, parallel_iterations=1)
    dense_mask = dense_mask[:, 0, :]

    label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
                             label_shape)
    label_ind = tf.boolean_mask(label_array, dense_mask)

    batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]),
                                                  max_num_labels_tns), reverse(label_shape, 0)))
    batch_ind = tf.boolean_mask(batch_array, dense_mask)
    indices = tf.transpose(tf.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]))

    vals_sparse = tf.gather_nd(labels, indices)

    return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))
pot.py 文件源码 项目:adagan 作者: tolstikhin 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def discriminator_cramer_test(self, opts, input_):
        """Deterministic discriminator using Cramer von Mises Test.

        """
        add_dim = opts['z_test_proj_dim']
        if add_dim > 0:
            dim = int(input_.get_shape()[1])
            proj = np.random.rand(dim, add_dim)
            proj = proj - np.mean(proj, 0)
            norms = np.sqrt(np.sum(np.square(proj), 0) + 1e-5)
            proj = tf.constant(proj / norms, dtype=tf.float32)
            projected_x = tf.matmul(input_, proj)  # Shape [batch_size, add_dim].

            # Shape [batch_size, z_dim+add_dim]
            all_dims_x = tf.concat([input_, projected_x], 1)
        else:
            all_dims_x = input_

        # top_k can only sort on the last dimension and we want to sort the
        # first one (batch_size).
        batch_size = self.get_batch_size(opts, all_dims_x)
        transposed = tf.transpose(all_dims_x, perm=[1, 0])
        values, indices = tf.nn.top_k(transposed, k=tf.cast(batch_size, tf.int32))
        values = tf.reverse(values, [1])
        #values = tf.Print(values, [values], "sorted values")
        normal_dist = tf.contrib.distributions.Normal(0., float(opts['pot_pz_std']))
        #
        normal_cdf = normal_dist.cdf(values)
        #normal_cdf = tf.Print(normal_cdf, [normal_cdf], "normal_cdf")
        expected = (2 * tf.range(1, batch_size+1, 1, dtype="float") - 1) / (2.0 * batch_size)
        #expected = tf.Print(expected, [expected], "expected")
        # We don't use the constant.
        # constant = 1.0 / (12.0 * batch_size * batch_size)
        # stat = constant + tf.reduce_sum(tf.square(expected - normal_cdf), 1) / batch_size
        stat = tf.reduce_sum(tf.square(expected - normal_cdf), 1) / batch_size
        stat = tf.reduce_mean(stat)
        #stat = tf.Print(stat, [stat], "stat")
        return stat
pot.py 文件源码 项目:adagan 作者: tolstikhin 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def discriminator_anderson_test(self, opts, input_):
        """Deterministic discriminator using the Anderson Darling test.

        """
        # A-D test says to normalize data before computing the statistic
        # Because true mean and variance are known, we are supposed to use
        # the population parameters for that, but wiki says it's better to
        # still use the sample estimates while normalizing
        means = tf.reduce_mean(input_, 0)
        input_ = input_ - means # Broadcasting
        stds = tf.sqrt(1e-5 + tf.reduce_mean(tf.square(input_), 0))
        input_= input_ / stds
        # top_k can only sort on the last dimension and we want to sort the
        # first one (batch_size).
        batch_size = self.get_batch_size(opts, input_)
        transposed = tf.transpose(input_, perm=[1, 0])
        values, indices = tf.nn.top_k(transposed, k=tf.cast(batch_size, tf.int32))
        values = tf.reverse(values, [1])
        normal_dist = tf.contrib.distributions.Normal(0., float(opts['pot_pz_std']))
        normal_cdf = normal_dist.cdf(values)
        # ln_normal_cdf is of shape (z_dim, batch_size)
        ln_normal_cdf = tf.log(normal_cdf)
        ln_one_normal_cdf = tf.log(1.0 - normal_cdf)
        w1 = 2 * tf.range(1, batch_size + 1, 1, dtype="float") - 1
        w2 = 2 * tf.range(batch_size - 1, -1, -1, dtype="float") + 1
        stat = -batch_size - tf.reduce_sum(w1 * ln_normal_cdf + \
                                           w2 * ln_one_normal_cdf, 1) / batch_size
        # stat is of shape (z_dim)
        stat = tf.reduce_mean(tf.square(stat))
        return stat


问题


面经


文章

微信
公众号

扫码关注公众号