python类TensorArray()的实例源码

layers.py 文件源码 项目:LiTeFlow 作者: petrux 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _call_helper(self):
        time = tf.constant(0, dtype=tf.int32)
        inp = self._decoder.init_input()
        state = self._decoder.init_state()
        finished = tf.tile([False], [utils.get_dimension(inp, 0)])
        output_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
        loop_vars = [time, inp, state, finished, output_ta]
        results = tf.while_loop(
            cond=self.cond, body=self.body, loop_vars=loop_vars,
            parallel_iterations=self._parallel_iterations,
            swap_memory=self._swap_memory)
        output_ta = results[-1]
        output = output_ta.stack()
        output = tf.transpose(output, [1, 0, 2])
        state = results[2]
        return output, state
base_attention.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def empty_attention_loop_state() -> AttentionLoopStateTA:
    """Create an empty attention loop state.

    The attention loop state is a technical object for storing the attention
    distributions and the context vectors in time. It is used with the
    ``tf.while_loop`` dynamic implementation of the decoder.

    This function returns an empty attention loop state which means there are
    two empty arrays, one for attention distributions in time, and one for
    the attention context vectors in time.
    """
    return AttentionLoopStateTA(
        contexts=tf.TensorArray(
            dtype=tf.float32, size=0, dynamic_size=True,
            name="contexts"),
        weights=tf.TensorArray(
            dtype=tf.float32, size=0, dynamic_size=True,
            name="distributions", clear_after_read=False))
base_attention.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def empty_attention_loop_state() -> AttentionLoopStateTA:
    """Create an empty attention loop state.

    The attention loop state is a technical object for storing the attention
    distributions and the context vectors in time. It is used with the
    ``tf.while_loop`` dynamic implementation of the decoder.

    This function returns an empty attention loop state which means there are
    two empty arrays, one for attention distributions in time, and one for
    the attention context vectors in time.
    """
    return AttentionLoopStateTA(
        contexts=tf.TensorArray(
            dtype=tf.float32, size=0, dynamic_size=True,
            name="contexts"),
        weights=tf.TensorArray(
            dtype=tf.float32, size=0, dynamic_size=True,
            name="distributions", clear_after_read=False))
base_attention.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def empty_attention_loop_state() -> AttentionLoopStateTA:
    """Create an empty attention loop state.

    The attention loop state is a technical object for storing the attention
    distributions and the context vectors in time. It is used with the
    ``tf.while_loop`` dynamic implementation of the decoder.

    This function returns an empty attention loop state which means there are
    two empty arrays, one for attention distributions in time, and one for
    the attention context vectors in time.
    """
    return AttentionLoopStateTA(
        contexts=tf.TensorArray(
            dtype=tf.float32, size=0, dynamic_size=True,
            name="contexts"),
        weights=tf.TensorArray(
            dtype=tf.float32, size=0, dynamic_size=True,
            name="distributions", clear_after_read=False))
meta.py 文件源码 项目:tf-tutorial 作者: zchen0211 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def meta_loss(make_loss):
  x, constants = _get_variables(make_loss)

  print("Optimizee variables")
  print([op.name for op in x])
  print("Problem variables")
  print([op.name for op in constants])

  fx = _make_with_custom_variables(make_loss, x)
  log.info(type(fx))
  print fx is None

  fx_array = tf.TensorArray(tf.float32, 1, clear_after_read=False)
  fx_array = fx_array.write(0, fx)
  loss = tf.reduce_sum(fx_array.stack(), name="loss")


# problem = simple()
# meta_minimize(problem)
# log.info(type(fx))
# sess = tf.Session()
# sess.run(tf.global_variables_initializer())
# print sess.run(loss)
utility.py 文件源码 项目:Neural-Turing-Machine 作者: camigord 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def pack_into_tensor(array, axis):
    """
    packs a given TensorArray into a tensor along a given axis

    Parameters:
    ----------
    array: TensorArray
        the tensor array to pack
    axis: int
        the axis to pack the array along

    Returns: Tensor
        the packed tensor
    """

    packed_tensor = array.pack()
    shape = packed_tensor.get_shape()
    rank = len(shape)

    dim_permutation = [axis] + range(1, axis) + [0]  + range(axis + 1, rank)
    correct_shape_tensor = tf.transpose(packed_tensor, dim_permutation)

    return correct_shape_tensor
beam_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def __init__(self, training, cell, embedding, start_tokens, end_token, initial_state, beam_width, output_layer=None, gold_sequence=None, gold_sequence_length=None):
        self._training = training
        self._cell = cell
        self._output_layer = output_layer
        self._embedding_fn = lambda ids: tf.nn.embedding_lookup(embedding, ids)

        self._output_size = output_layer.units if output_layer is not None else self._output.output_size
        self._batch_size = tf.size(start_tokens)
        self._beam_width = beam_width
        self._tiled_initial_cell_state = nest.map_structure(self._maybe_split_batch_beams, initial_state, self._cell.state_size)
        self._start_tokens = start_tokens
        self._tiled_start_tokens = self._maybe_tile_batch(start_tokens)
        self._end_token = end_token

        self._original_gold_sequence = gold_sequence
        self._gold_sequence = gold_sequence
        self._gold_sequence_length = gold_sequence_length
        if training:
            assert self._gold_sequence is not None
            assert self._gold_sequence_length is not None
            self._max_time = int(self._gold_sequence.shape[1])
            # transpose gold sequence to be time major and make it into a TensorArray
            self._gold_sequence = tf.TensorArray(dtype=tf.int32, size=self._max_time)
            self._gold_sequence = self._gold_sequence.unstack(tf.transpose(gold_sequence, [1, 0]))
beam_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _maybe_split_batch_beams(self, t, s):
        """Maybe splits the tensor from a batch by beams into a batch of beams.
        We do this so that we can use nest and not run into problems with shapes.
        Args:
          t: Tensor of dimension [batch_size*beam_width, s]
          s: Tensor, Python int, or TensorShape.
        Returns:
          Either a reshaped version of t with dimension
          [batch_size, beam_width, s] if t's first dimension is of size
          batch_size*beam_width or t if not.
        Raises:
          TypeError: If t is an instance of TensorArray.
          ValueError: If the rank of t is not statically known.
        """
        return self._split_batch_beams(t, s) if t.shape.ndims >= 1 else t
beam_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _maybe_merge_batch_beams(self, t, s):
        """Splits the tensor from a batch by beams into a batch of beams.
        More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
        reshape this into [batch_size, beam_width, s]
        Args:
          t: Tensor of dimension [batch_size*beam_width, s]
          s: Tensor, Python int, or TensorShape.
        Returns:
          A reshaped version of t with dimension [batch_size, beam_width, s].
        Raises:
          TypeError: If t is an instance of TensorArray.
          ValueError:  If the rank of t is not statically known.
        """
        return self._merge_batch_beams(t, s) if t.shape.ndims >= 2 else t
grammar_decoder.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, grammar : AbstractGrammar, *args, training_output=None, grammar_helper : GrammarHelper = None, **kw):
        super().__init__(*args, **kw)
        self._grammar = grammar
        self._grammar_helper = grammar_helper if grammar_helper is not None else GrammarHelper(grammar)
        self._fixed_outputs = training_output
        if training_output is not None:
            self._fixed_outputs = tf.TensorArray(dtype=tf.int32, size=training_output.get_shape()[1])
            self._fixed_outputs = self._fixed_outputs.unstack(tf.transpose(training_output, [1, 0]))
cluster.py 文件源码 项目:section-detection 作者: gulfaraz 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def meanShift(n_updates=-1):
    X1 = tf.expand_dims(tf.transpose(input_X), 0)
    X2 = tf.expand_dims(input_X, 0)
    C = init_C

    sbs_C = tf.TensorArray(dtype=tf.float32, size=10000, infer_shape=False)
    sbs_C = sbs_C.write(0, init_C)

    def _mean_shift_step(C):
        C = tf.expand_dims(C, 2)
        Y = tf.reduce_sum(tf.pow((C - X1) / window_radius, 2), axis=1)
        gY = tf.exp(-Y)
        num = tf.reduce_sum(tf.expand_dims(gY, 2) * X2, axis=1)
        denom = tf.reduce_sum(gY, axis=1, keep_dims=True)
        C = num / denom
        return C

    if n_updates > 0:
        for i in range(n_updates):
            C = _mean_shift_step(C)
            sbs_C = sbs_C.write(i + 1, C)
    else:
        def _mean_shift(i, C, sbs_C, max_diff):
            new_C = _mean_shift_step(C)
            max_diff = tf.reshape(tf.reduce_max(tf.sqrt(tf.reduce_sum(tf.pow(new_C - C, 2), axis=1))), [])
            sbs_C = sbs_C.write(i + 1, new_C)
            return i + 1, new_C, sbs_C, max_diff

        def _cond(i, C, sbs_C, max_diff):
            return max_diff > 1e-5

        n_updates, C, sbs_C, _ = tf.while_loop(cond=_cond,
                                       body=_mean_shift,
                                       loop_vars=(tf.constant(0), C, sbs_C, tf.constant(1e10)))

        n_updates = tf.Print(n_updates, [n_updates])


    return C, sbs_C.gather(tf.range(n_updates + 1))
inputs.py 文件源码 项目:tf_classification 作者: visipedia 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_distorted_inputs(original_image, bboxes, cfg, add_summaries):

    distorter = DistortedInputs(cfg, add_summaries)
    num_bboxes = tf.shape(bboxes)[0]
    distorted_inputs = tf.TensorArray(
        dtype=tf.float32,
        size=num_bboxes,
        element_shape=tf.TensorShape([1, cfg.INPUT_SIZE, cfg.INPUT_SIZE, 3])
    )

    if add_summaries:
        image_summaries = tf.TensorArray(
            dtype=tf.float32,
            size=4,
            element_shape=tf.TensorShape([1, cfg.INPUT_SIZE, cfg.INPUT_SIZE, 3])
        )
    else:
        image_summaries = tf.constant([])

    current_index = tf.constant(0, dtype=tf.int32)

    loop_vars = [original_image, bboxes, distorted_inputs, image_summaries, current_index]
    original_image, bboxes, distorted_inputs, image_summaries, current_index = tf.while_loop(
        cond=bbox_crop_loop_cond,
        body=distorter.apply,
        loop_vars=loop_vars,
        parallel_iterations=10, back_prop=False, swap_memory=False
    )

    distorted_inputs = distorted_inputs.concat()

    if add_summaries:
        tf.summary.image('0.original_image', image_summaries.read(0))
        tf.summary.image('1.image_with_random_crop', image_summaries.read(1))
        tf.summary.image('2.cropped_resized_image', image_summaries.read(2))
        tf.summary.image('3.final_distorted_image', image_summaries.read(3))


    return distorted_inputs
layers.py 文件源码 项目:LiTeFlow 作者: petrux 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, cell, location_softmax, pointing_output,
                 input_size, decoder_inputs=None,
                 trainable=True, name=None, **kwargs):
        """Initializes a new PointingSoftmaxDecoder instance.

        See the class documentation for the escription of all the arguments.
        """
        super(PointingSoftmaxDecoder, self).__init__(
            trainable=trainable, name=name, **kwargs)
        self._cell = cell
        self._loc = location_softmax
        self._out = pointing_output
        self._inp_size = input_size

        if decoder_inputs is not None:
            tensors = tf.transpose(decoder_inputs, [1, 0, 2])
            dtype = tensors.dtype
            size = tf.shape(tensors)[0]
            element_shape = tensors.get_shape()[1:]
            tensor_array = tf.TensorArray(dtype=dtype, size=size, element_shape=element_shape)
            decoder_inputs = tensor_array.unstack(tensors)
        self._inputs_ta = decoder_inputs

        # infer the batch/location size from the `states` tensor
        # of the attention layer of the injected location softmax.
        states = self._loc.attention.states
        self._batch_size = utils.get_dimension(states, 0)
        self._loc_size = utils.get_dimension(states, 1)
roi.py 文件源码 项目:dataset 作者: analysiscenter 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _array_to_tuple(inputs, size, shape=None):
    """ Convert tf.TensorArray to tf.Tuple. """
    with tf.variable_scope('array_to_tuple'):
        if shape is None:
            output = tf.tuple([inputs.read(i) for i in range(size)])
        else:
            output = tf.tuple([tf.reshape(inputs.read(i), shape) for i in range(size)])
    return output
seq2seq_utils.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _unstack_ta(inp):
    return tf.TensorArray(
        dtype=inp.dtype, size=tf.shape(inp)[0],
        element_shape=inp.get_shape()[1:]).unstack(inp)
beam_search_decoder.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def get_initial_loop_state(self) -> BeamSearchLoopState:
        # TODO make these feedable
        output_ta = SearchStepOutputTA(
            scores=tf.TensorArray(dtype=tf.float32, dynamic_size=True,
                                  size=0, name="beam_scores"),
            parent_ids=tf.TensorArray(dtype=tf.int32, dynamic_size=True,
                                      size=0, name="beam_parents"),
            token_ids=tf.TensorArray(dtype=tf.int32, dynamic_size=True,
                                     size=0, name="beam_tokens"))

        # We run the decoder once to get logits for ensembling
        dec_ls = self.parent_decoder.get_initial_loop_state()
        decoder_body = self.parent_decoder.get_body(False)
        dec_ls = decoder_body(*dec_ls)

        # We want to feed these values in ensembles
        self._search_state = SearchState(
            logprob_sum=tf.placeholder_with_default([0.0], [None]),
            prev_logprobs=tf.nn.log_softmax(dec_ls.feedables.prev_logits),
            lengths=tf.placeholder_with_default([1], [None]),
            finished=tf.placeholder_with_default([False], [None]))

        self._decoder_state = dec_ls.feedables

        # TODO make TensorArrays also feedable
        return BeamSearchLoopState(
            bs_state=self._search_state,
            bs_output=output_ta,
            decoder_loop_state=dec_ls)
coverage.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def get_energies(self, y: tf.Tensor, weights_in_time: tf.TensorArray):
        weight_sum = tf.cond(
            tf.greater(weights_in_time.size(), 0),
            lambda: tf.reduce_sum(weights_in_time.stack(), axis=0),
            lambda: 0.0)

        coverage = weight_sum / self.fertility * self.attention_mask
        logits = tf.reduce_sum(
            self.similarity_bias_vector * tf.tanh(
                self.hidden_features + y + self.coverage_weights *
                tf.expand_dims(tf.expand_dims(coverage, -1), -1)),
            [2, 3])

        return logits
scaled_dot_product.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def initial_loop_state(self) -> MultiHeadLoopStateTA:
        return MultiHeadLoopStateTA(
            contexts=tf.TensorArray(
                dtype=tf.float32, size=0, dynamic_size=True,
                name="contexts"),
            head_weights=[tf.TensorArray(
                dtype=tf.float32, size=0, dynamic_size=True,
                name="distributions_head{}".format(i), clear_after_read=False)
                          for i in range(self.n_heads)])
beam_search_decoder.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_initial_loop_state(self) -> BeamSearchLoopState:
        # TODO make these feedable
        output_ta = SearchStepOutputTA(
            scores=tf.TensorArray(dtype=tf.float32, dynamic_size=True,
                                  size=0, name="beam_scores"),
            parent_ids=tf.TensorArray(dtype=tf.int32, dynamic_size=True,
                                      size=0, name="beam_parents"),
            token_ids=tf.TensorArray(dtype=tf.int32, dynamic_size=True,
                                     size=0, name="beam_tokens"))

        # We run the decoder once to get logits for ensembling
        dec_ls = self.parent_decoder.get_initial_loop_state()
        decoder_body = self.parent_decoder.get_body(False)
        dec_ls = decoder_body(*dec_ls)

        # We want to feed these values in ensembles
        self._search_state = SearchState(
            logprob_sum=tf.placeholder_with_default([0.0], [None]),
            prev_logprobs=tf.nn.log_softmax(dec_ls.feedables.prev_logits),
            lengths=tf.placeholder_with_default([1], [None]),
            finished=tf.placeholder_with_default([False], [None]))

        self._decoder_state = dec_ls.feedables

        # TODO make TensorArrays also feedable
        return BeamSearchLoopState(
            bs_state=self._search_state,
            bs_output=output_ta,
            decoder_loop_state=dec_ls)
coverage.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def get_energies(self, y: tf.Tensor, weights_in_time: tf.TensorArray):
        weight_sum = tf.cond(
            tf.greater(weights_in_time.size(), 0),
            lambda: tf.reduce_sum(weights_in_time.stack(), axis=0),
            lambda: 0.0)

        coverage = weight_sum / self.fertility * self.attention_mask
        logits = tf.reduce_sum(
            self.similarity_bias_vector * tf.tanh(
                self.hidden_features + y + self.coverage_weights *
                tf.expand_dims(tf.expand_dims(coverage, -1), -1)),
            [2, 3])

        return logits
scaled_dot_product.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def initial_loop_state(self) -> MultiHeadLoopStateTA:
        return MultiHeadLoopStateTA(
            contexts=tf.TensorArray(
                dtype=tf.float32, size=0, dynamic_size=True,
                name="contexts"),
            head_weights=[tf.TensorArray(
                dtype=tf.float32, size=0, dynamic_size=True,
                name="distributions_head{}".format(i), clear_after_read=False)
                          for i in range(self.n_heads)])
coverage.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def get_energies(self, y: tf.Tensor, weights_in_time: tf.TensorArray):
        weight_sum = tf.cond(
            tf.greater(weights_in_time.size(), 0),
            lambda: tf.reduce_sum(weights_in_time.stack(), axis=0),
            lambda: 0.0)

        coverage = weight_sum / self.fertility * self.attention_mask
        logits = tf.reduce_sum(
            self.similarity_bias_vector * tf.tanh(
                self.hidden_features + y + self.coverage_weights *
                tf.expand_dims(tf.expand_dims(coverage, -1), -1)),
            [2, 3])

        return logits
scaled_dot_product.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def initial_loop_state(self) -> MultiHeadLoopStateTA:
        return MultiHeadLoopStateTA(
            contexts=tf.TensorArray(
                dtype=tf.float32, size=0, dynamic_size=True,
                name="contexts"),
            head_weights=[tf.TensorArray(
                dtype=tf.float32, size=0, dynamic_size=True,
                name="distributions_head{}".format(i), clear_after_read=False)
                          for i in range(self.n_heads)])
layers.py 文件源码 项目:mist-rnns 作者: rdipietro 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _compute_states(self):
    """ Compute hidden states.

    Returns:
      A tuple, (outputs, states).
    """

    _inputs = tf.transpose(self.inputs, [1, 0, 2])
    x_ta = tf.TensorArray(tf.float32, size=self.length).unstack(_inputs)
    h_ta = tf.TensorArray(tf.float32, size=self.length)

    def cond(t, h, h_ta):
      return tf.less(t, self.length)

    def body(t, h, h_ta):

      x = x_ta.read(t)
      num_units, input_size = self.num_hidden_units, self.input_size

      with tf.variable_scope('simple_rnn'):
        h_new = self.activation(self._linear(h, x, num_units, scope='simple_rnn'))

      h_ta_new = h_ta.write(t, h_new)
      return t + 1, h_new, h_ta_new

    t = tf.constant(0)
    h = tf.squeeze(self.initial_states, [1])
    _, _, h_ta = tf.while_loop(cond, body, [t, h, h_ta])

    states = tf.transpose(h_ta.stack(), [1, 0, 2], name='states')
    outputs = tf.identity(states, name='outputs')
    return outputs, states
layers.py 文件源码 项目:mist-rnns 作者: rdipietro 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _compute_states(self):

    _inputs = tf.transpose(self.inputs, [1, 0, 2])
    x_ta = tf.TensorArray(tf.float32, size=self.length).unstack(_inputs)
    h_ta = tf.TensorArray(tf.float32, size=self.length)
    c_ta = tf.TensorArray(tf.float32, size=self.length)

    def cond(t, c, h, c_ta, h_ta):
      return tf.less(t, self.length)

    def body(t, c, h, c_ta, h_ta):

      x = x_ta.read(t)
      num_units, input_size = self.num_hidden_units, self.input_size

      with tf.variable_scope('lstm'):
        c_tilde = self.activation(self._linear(h, x, num_units, scope='c'))
        i = tf.nn.sigmoid(self._linear(h, x, num_units, scope='i'))
        f = tf.nn.sigmoid(self._linear(h, x, num_units, shift=self.optional_bias_shift, scope='f'))
        o = tf.nn.sigmoid(self._linear(h, x, num_units, scope='o'))
        c_new = i * c_tilde + f * c
        h_new = o * self.activation(c_new)

      c_ta_new = c_ta.write(t, c_new)
      h_ta_new = h_ta.write(t, h_new)
      return t + 1, c_new, h_new, c_ta_new, h_ta_new

    t = tf.constant(0)
    c, h = tf.split(tf.squeeze(self.initial_states, [1]), 2, axis=1)
    _, _, _, c_ta, h_ta = tf.while_loop(cond, body, [t, c, h, c_ta, h_ta])

    outputs = tf.transpose(h_ta.stack(), [1, 0, 2], name='outputs')
    cells = tf.transpose(c_ta.stack(), [1, 0, 2])
    states = tf.concat([cells, outputs], axis=2, name='states')
    return outputs, states
layers.py 文件源码 项目:mist-rnns 作者: rdipietro 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _compute_states(self):

    _inputs = tf.transpose(self.inputs, [1, 0, 2])
    x_ta = tf.TensorArray(tf.float32, size=self.length).unstack(_inputs)
    h_ta = tf.TensorArray(tf.float32, size=self.length)

    def cond(t, h, h_ta):
      return tf.less(t, self.length)

    def body(t, h, h_ta):

      x = x_ta.read(t)
      num_units, input_size = self.num_hidden_units, self.input_size

      with tf.variable_scope('gru'):
        r = tf.nn.sigmoid(self._linear(h, x, num_units, scope='r'))
        h_pre_act = r * h
        h_tilde = self.activation(self._linear(h_pre_act, x, num_units, scope='h'))

        z = tf.nn.sigmoid(self._linear(h, x, num_units, shift=self.optional_bias_shift, scope='z'))
        h_new = z * h + (1 - z) * h_tilde

      h_ta_new = h_ta.write(t, h_new)
      return t + 1, h_new, h_ta_new

    t = tf.constant(0)
    h = tf.squeeze(self.initial_states, [1])
    _, _, h_ta = tf.while_loop(cond, body, [t, h, h_ta])

    states = tf.transpose(h_ta.stack(), [1, 0, 2], name='states')
    outputs = tf.identity(states, name='outputs')
    return outputs, states
DDTL_alex.py 文件源码 项目:tensorflow-DDT 作者: wangchao66 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def source_distance(x,y):
    y = tf.cast(tf.argmax(y,axis=1),tf.float32)
    y1,_,_ = tf.unique_with_counts(y)
    TensorArr = tf.TensorArray(tf.float32,size=1, dynamic_size=True,clear_after_read=False)
    x_array = TensorArr.unstack(y1)
    size = x_array.size()
    initial_outputs = tf.TensorArray(dtype=tf.float32,size=size)
    i = tf.constant(0)
    def should_continue(i, *args):
        return i < size
    def loop(i,output):
        y_class = x_array.read(i)
        idx_i = tf.where(tf.equal(y,y_class))
        xi = tf.gather_nd(x,idx_i)
        initial_outputs1 = tf.TensorArray(dtype=tf.float32,size=size)
        j = tf.constant(0)
        def should_continue1(j,*args):
            return j<size
        def loop1(j,output1):
            y2=x_array.read(j)
            idx_j = tf.where(tf.equal(y,y2))
            xj = tf.gather_nd(x,idx_j)
            dis = tf.reduce_mean (tf.square(tf.reduce_mean(xi,0)
                        -tf.reduce_mean(xj,0)))
            output1 = output1.write(j,dis)
            return j+1,output1
        j,r1=tf.while_loop(should_continue1,loop1,[j,initial_outputs1])
        output = output.write(i,r1.stack())
        return i+1,output
    i,r = tf.while_loop(should_continue,loop,[i,initial_outputs])
    out = r.stack()
    return out
DDTL_resnet.py 文件源码 项目:tensorflow-DDT 作者: wangchao66 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def source_distance(x,y):
    y = tf.cast(tf.argmax(y,axis=1),tf.float32)
    y1,_,_ = tf.unique_with_counts(y)
    TensorArr = tf.TensorArray(tf.float32,size=1, dynamic_size=True,clear_after_read=False)
    x_array = TensorArr.unstack(y1)
    size = x_array.size()
    initial_outputs = tf.TensorArray(dtype=tf.float32,size=size)
    i = tf.constant(0)
    def should_continue(i, *args):
        return i < size
    def loop(i,output):
        y_class = x_array.read(i)
        idx_i = tf.where(tf.equal(y,y_class))
        xi = tf.gather_nd(x,idx_i)
        initial_outputs1 = tf.TensorArray(dtype=tf.float32,size=size)
        j = tf.constant(0)
        def should_continue1(j,*args):
            return j<size
        def loop1(j,output1):
            y2=x_array.read(j)
            idx_j = tf.where(tf.equal(y,y2))
            xj = tf.gather_nd(x,idx_j)
            dis = tf.reduce_mean (tf.square(tf.reduce_mean(xi,0)
                        -tf.reduce_mean(xj,0)))
            output1 = output1.write(j,dis)
            return j+1,output1
        j,r1=tf.while_loop(should_continue1,loop1,[j,initial_outputs1])
        output = output.write(i,r1.stack())
        return i+1,output
    i,r = tf.while_loop(should_continue,loop,[i,initial_outputs])
    out = r.stack()
    return out
sequence_table.py 文件源码 项目:master-thesis 作者: AndreasMadsen 项目源码 文件源码 阅读 47 收藏 0 点赞 0 评论 0
def __init__(self, size, dtype):
        # A TensorArray is required as the sequences don't have the same
        # length. Alternatively a FIFO query can be used.
        # Because the data is read more than once by the queue,
        # clear_after_read is set to False (but I can't confirm an effect).
        # Because the items has diffrent sequence lengths the infer_shape
        # is set to False. The shape is then restored in the .read method.
        self.data = tf.TensorArray(size=size,
                                   dtype=dtype,
                                   dynamic_size=False,
                                   clear_after_read=False,
                                   infer_shape=False)
control.py 文件源码 项目:tensorflow-extenteten 作者: raviqqe 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def unpack_to_array(tensor):
    return tf.TensorArray(tensor.dtype, tf.shape(tensor)[0]).unpack(tensor)


问题


面经


文章

微信
公众号

扫码关注公众号