python类while_loop()的实例源码

hmc.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _leapfrog(self, q, p, step_size, get_gradient, mass):
        def loop_cond(i, q, p):
            return i < self.n_leapfrogs + 1

        def loop_body(i, q, p):
            step_size1 = tf.cond(i > 0,
                                 lambda: step_size,
                                 lambda: tf.constant(0.0, dtype=tf.float32))

            step_size2 = tf.cond(tf.logical_and(tf.less(i, self.n_leapfrogs),
                                                tf.less(0, i)),
                                 lambda: step_size,
                                 lambda: step_size / 2)

            q, p = leapfrog_integrator(q, p, step_size1, step_size2,
                                       lambda q: get_gradient(q), mass)
            return [i + 1, q, p]

        i = tf.constant(0)
        _, q, p = tf.while_loop(loop_cond,
                                loop_body,
                                [i, q, p],
                                back_prop=False,
                                parallel_iterations=1)
        return q, p
hmc.py 文件源码 项目:a-nice-mc 作者: ermongroup 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def simulate_dynamics(initial_pos, initial_vel, stepsize, n_steps, energy_fn):
    def leapfrog(pos, vel, step, i):
        de_dp_ = tf.gradients(tf.reduce_sum(energy_fn(pos)), pos)[0]
        new_vel_ = vel - step * de_dp_
        new_pos_ = pos + step * new_vel_
        return [new_pos_, new_vel_, step, tf.add(i, 1)]

    def condition(pos, vel, step, i):
        return tf.less(i, n_steps)

    de_dp = tf.gradients(tf.reduce_sum(energy_fn(initial_pos)), initial_pos)[0]
    vel_half_step = initial_vel - 0.5 * stepsize * de_dp
    pos_full_step = initial_pos + stepsize * vel_half_step

    i = tf.constant(0)
    final_pos, new_vel, _, _ = tf.while_loop(condition, leapfrog, [pos_full_step, vel_half_step, stepsize, i])
    de_dp = tf.gradients(tf.reduce_sum(energy_fn(final_pos)), final_pos)[0]
    final_vel = new_vel - 0.5 * stepsize * de_dp
    return final_pos, final_vel
deepfool.py 文件源码 项目:tensorflow-adversarial 作者: gongzhitaao 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def _deepfool2(model, x, epochs, eta, clip_min, clip_max, min_prob):
    y0 = tf.stop_gradient(tf.reshape(model(x), [-1])[0])
    y0 = tf.to_int32(tf.greater(y0, 0.5))

    def _cond(i, z):
        xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max)
        y = tf.stop_gradient(tf.reshape(model(xadv), [-1])[0])
        y = tf.to_int32(tf.greater(y, 0.5))
        return tf.logical_and(tf.less(i, epochs), tf.equal(y0, y))

    def _body(i, z):
        xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max)
        y = tf.reshape(model(xadv), [-1])[0]
        g = tf.gradients(y, xadv)[0]
        dx = - y * g / tf.norm(g)
        return i+1, z+dx

    _, noise = tf.while_loop(_cond, _body, [0, tf.zeros_like(x)],
                             name='_deepfool2_impl', back_prop=False)
    return noise
pondering_rnn.py 文件源码 项目:sonnet 作者: deepmind 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _body(self, x, cumul_out, prev_state, cumul_state,
            cumul_halting, iteration, remainder, halting_linear, x_ones):
    """The `body` of `tf.while_loop`."""
    # Increase iteration count only for those elements that are still running.
    all_ones = tf.constant(1, shape=(self._batch_size, 1), dtype=self._dtype)
    is_iteration_over = tf.equal(cumul_halting, all_ones)
    next_iteration = tf.where(is_iteration_over, iteration, iteration + 1)
    out, next_state = self._core(x, prev_state)
    # Get part of state used to compute halting values.
    halting_input = halting_linear(self._get_state_for_halting(next_state))
    halting = tf.sigmoid(halting_input, name="halting")
    next_cumul_halting_raw = cumul_halting + halting
    over_threshold = next_cumul_halting_raw > self._threshold
    next_cumul_halting = tf.where(over_threshold, all_ones,
                                  next_cumul_halting_raw)
    next_remainder = tf.where(over_threshold, remainder,
                              1 - next_cumul_halting_raw)
    p = next_cumul_halting - cumul_halting
    next_cumul_state = _nested_add(cumul_state,
                                   _nested_unary_mul(next_state, p))
    next_cumul_out = cumul_out + p * out

    return (x_ones, next_cumul_out, next_state, next_cumul_state,
            next_cumul_halting, next_iteration, next_remainder)
layers.py 文件源码 项目:LiTeFlow 作者: petrux 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _call_helper(self):
        time = tf.constant(0, dtype=tf.int32)
        inp = self._decoder.init_input()
        state = self._decoder.init_state()
        finished = tf.tile([False], [utils.get_dimension(inp, 0)])
        output_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
        loop_vars = [time, inp, state, finished, output_ta]
        results = tf.while_loop(
            cond=self.cond, body=self.body, loop_vars=loop_vars,
            parallel_iterations=self._parallel_iterations,
            swap_memory=self._swap_memory)
        output_ta = results[-1]
        output = output_ta.stack()
        output = tf.transpose(output, [1, 0, 2])
        state = results[2]
        return output, state
tensor_ops.py 文件源码 项目:hart 作者: akosiorek 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def broadcast_against(tensor, against_expr):
    """Adds trailing dimensions to mask to enable broadcasting against data

    :param tensor: tensor to be broadcasted
    :param against_expr: tensor will be broadcasted against it
    :return: mask expr with tf.rank(mask) == tf.rank(data)
    """

    def cond(data, tensor):
        return tf.less(tf.rank(tensor), tf.rank(data))

    def body(data, tensor):
        return data, tf.expand_dims(tensor, -1)

    shape_invariants = [against_expr.get_shape(), tf.TensorShape(None)]
    _, tensor = tf.while_loop(cond, body, [against_expr, tensor], shape_invariants)
    return tensor
feudal_network.py 文件源码 项目:tensorflow-rl 作者: steveKapturowski 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _build_ops(self):
        i0 = tf.constant(0, dtype=tf.int32)
        loop_condition = lambda i, inputs, state: tf.less(i, self.max_steps)

        def body(i, inputs, full_state):
            idx = i % self.num_cores
            prev_state = full_state[idx]
            inputs, full_state[idx] = self.shared_cell(inputs, prev_state)

            return i+1, inputs, full_state

        _, inputs, full_state = tf.while_loop(
            loop_condition,
            body,
            loop_vars=[i0,
                       self.inputs,
                       self.initial_state])
base_attention.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def empty_attention_loop_state() -> AttentionLoopStateTA:
    """Create an empty attention loop state.

    The attention loop state is a technical object for storing the attention
    distributions and the context vectors in time. It is used with the
    ``tf.while_loop`` dynamic implementation of the decoder.

    This function returns an empty attention loop state which means there are
    two empty arrays, one for attention distributions in time, and one for
    the attention context vectors in time.
    """
    return AttentionLoopStateTA(
        contexts=tf.TensorArray(
            dtype=tf.float32, size=0, dynamic_size=True,
            name="contexts"),
        weights=tf.TensorArray(
            dtype=tf.float32, size=0, dynamic_size=True,
            name="distributions", clear_after_read=False))
base_attention.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def empty_attention_loop_state() -> AttentionLoopStateTA:
    """Create an empty attention loop state.

    The attention loop state is a technical object for storing the attention
    distributions and the context vectors in time. It is used with the
    ``tf.while_loop`` dynamic implementation of the decoder.

    This function returns an empty attention loop state which means there are
    two empty arrays, one for attention distributions in time, and one for
    the attention context vectors in time.
    """
    return AttentionLoopStateTA(
        contexts=tf.TensorArray(
            dtype=tf.float32, size=0, dynamic_size=True,
            name="contexts"),
        weights=tf.TensorArray(
            dtype=tf.float32, size=0, dynamic_size=True,
            name="distributions", clear_after_read=False))
meta_test.py 文件源码 项目:learning-to-learn 作者: deepmind 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def testWhileLoopProblem(self):
    """Tests L2L applied to problem with while loop."""
    def while_loop_problem():
      x = tf.get_variable("x", shape=[], initializer=tf.ones_initializer())

      # Strange way of squaring the variable.
      _, x_squared = tf.while_loop(
          cond=lambda t, _: t < 1,
          body=lambda t, x: (t + 1, x * x),
          loop_vars=(0, x),
          name="loop")
      return x_squared

    optimizer = meta.MetaOptimizer(net=dict(
        net="CoordinateWiseDeepLSTM",
        net_options={"layers": ()}))
    minimize_ops = optimizer.meta_minimize(while_loop_problem, 3)
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      train(sess, minimize_ops, 1, 2)
mgp-rnn-fit.py 文件源码 项目:MGP-RNN 作者: jfutoma 项目源码 文件源码 阅读 48 收藏 0 点赞 0 评论 0
def get_probs_and_accuracy(preds,O):
    """
    helper function. we have a prediction for each MC sample of each observation
    in this batch.  need to distill the multiple preds from each MC into a single
    pred for this observation.  also get accuracy. use true probs to get ROC, PR curves in sklearn
    """
    all_probs = tf.exp(preds[:,1] - tf.reduce_logsumexp(preds, axis = 1)) #normalize; and drop a dim so only prob of positive case
    N = tf.cast(tf.shape(preds)[0]/n_mc_smps,tf.int32) #actual number of observations in preds, collapsing MC samples                    

    #predicted probability per observation; collapse the MC samples
    probs = tf.zeros([0]) #store all samples in a list, then concat into tensor at end
    #setup tf while loop (have to use this bc loop size is variable)
    def cond(i,probs):
        return i < N
    def body(i,probs):
        probs = tf.concat([probs,[tf.reduce_mean(tf.slice(all_probs,[i*n_mc_smps],[n_mc_smps]))]],0)
        return i+1,probs    
    i = tf.constant(0)
    i,probs = tf.while_loop(cond,body,loop_vars=[i,probs],shape_invariants=[i.get_shape(),tf.TensorShape([None])])

    #compare to truth; just use cutoff of 0.5 for right now to get accuracy
    correct_pred = tf.equal(tf.cast(tf.greater(probs,0.5),tf.int32), O)
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 
    return probs,accuracy
ccrc_model.py 文件源码 项目:Constituent-Centric-Neural-Architecture-for-Reading-Comprehension 作者: shrshore 项目源码 文件源码 阅读 52 收藏 0 点赞 0 评论 0
def get_candidates_representations_in_sentence(self, sentence_candidate_answers, sentence_attentioned_hidden_states):
        candidate_answer_num=tf.gather(tf.shape(sentence_candidate_answers), 0)
        logging.warn('candidate_answer_num:{}'.format(candidate_answer_num))
        logging.warn('sentence_candidate_answers:{}'.format(sentence_candidate_answers))
        candidate_answer_nodeids=tf.gather(sentence_candidate_answers, 0) #a node idx list
        candidate_answer_hidden_list=tf.gather(sentence_attentioned_hidden_states, candidate_answer_nodeids)
        candidate_final_representations=self.get_candidate_answer_final_representations(candidate_answer_hidden_list)
        candidates_final_representations=tf.expand_dims(candidate_final_representations, 0)
        idx_cand=tf.constant(1)
        def _recurse_candidate_answer(candidate_final_representations, idx_cand):
            cur_candidate_answer_nodeids=tf.gather(sentence_candidate_answers, idx_cand)
            cur_candidate_answer_hidden_list=tf.gather(sentence_attentioned_hidden_states, cur_candidate_answer_nodeids)
            cur_candidate_final_representations=tf.expand_dims( 
                self.get_candidate_answer_final_representations(cur_candidate_answer_hidden_list), 0)
            candidate_final_representations=tf.concat([candidate_final_representations, cur_candidate_final_representations], axis=0)
            idx_cand=tf.add(idx_cand,1)
            return candidate_final_representations, idx_cand
        loop_cond=lambda a1,idx:tf.less(idx, candidate_answer_num)
        loop_vars=[candidates_final_representations, idx_cand]
        candidates_final_representations, idx_cand=tf.while_loop(loop_cond, _recurse_candidate_answer, loop_vars,
            shape_invariants=[tf.TensorShape([None, 2*self.config.hidden_dim]),idx_cand.get_shape()])
        return candidates_final_representations
meta_test.py 文件源码 项目:tf-tutorial 作者: zchen0211 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def testWhileLoopProblem(self):
    """Tests L2L applied to problem with while loop."""
    def while_loop_problem():
      x = tf.get_variable("x", shape=[], initializer=tf.ones_initializer())

      # Strange way of squaring the variable.
      _, x_squared = tf.while_loop(
          cond=lambda t, _: t < 1,
          body=lambda t, x: (t + 1, x * x),
          loop_vars=(0, x),
          name="loop")
      return x_squared

    optimizer = meta.MetaOptimizer(net=dict(
        net="CoordinateWiseDeepLSTM",
        net_options={"layers": ()}))
    minimize_ops = optimizer.meta_minimize(while_loop_problem, 3)
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      train(sess, minimize_ops, 1, 2)
cluster.py 文件源码 项目:section-detection 作者: gulfaraz 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def meanShift(n_updates=-1):
    X1 = tf.expand_dims(tf.transpose(input_X), 0)
    X2 = tf.expand_dims(input_X, 0)
    C = init_C

    sbs_C = tf.TensorArray(dtype=tf.float32, size=10000, infer_shape=False)
    sbs_C = sbs_C.write(0, init_C)

    def _mean_shift_step(C):
        C = tf.expand_dims(C, 2)
        Y = tf.reduce_sum(tf.pow((C - X1) / window_radius, 2), axis=1)
        gY = tf.exp(-Y)
        num = tf.reduce_sum(tf.expand_dims(gY, 2) * X2, axis=1)
        denom = tf.reduce_sum(gY, axis=1, keep_dims=True)
        C = num / denom
        return C

    if n_updates > 0:
        for i in range(n_updates):
            C = _mean_shift_step(C)
            sbs_C = sbs_C.write(i + 1, C)
    else:
        def _mean_shift(i, C, sbs_C, max_diff):
            new_C = _mean_shift_step(C)
            max_diff = tf.reshape(tf.reduce_max(tf.sqrt(tf.reduce_sum(tf.pow(new_C - C, 2), axis=1))), [])
            sbs_C = sbs_C.write(i + 1, new_C)
            return i + 1, new_C, sbs_C, max_diff

        def _cond(i, C, sbs_C, max_diff):
            return max_diff > 1e-5

        n_updates, C, sbs_C, _ = tf.while_loop(cond=_cond,
                                       body=_mean_shift,
                                       loop_vars=(tf.constant(0), C, sbs_C, tf.constant(1e10)))

        n_updates = tf.Print(n_updates, [n_updates])


    return C, sbs_C.gather(tf.range(n_updates + 1))
cwt.py 文件源码 项目:cwt-tensorflow 作者: nickgeoca 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def cwt(wav, widthCwt, wavelet):
    length = wav.shape[0]
    wav = tf.to_float(wav)
    wav = tf.reshape(wav, [1,length,1,1])

    # While loop functions
    def body(i, m): 
        v = conv1DWavelet(wav, i, wavelet)
        v = tf.reshape(v, [length, 1])

        m = tf.concat([m,v], 1)

        return [1 + i, m]

    def cond_(i, m):
        return tf.less_equal(i, widthCwt)

    # Initialize and run while loop
    emptyCwtMatrix = tf.zeros([length, 0], dtype='float32') 
    i = tf.constant(1)
    _, result = tf.while_loop(
            cond_,
            body,
            [i, emptyCwtMatrix],
            shape_invariants=[i.get_shape(), tf.TensorShape([length, None])],
            back_prop=False,
            parallel_iterations=1024,
            )
    result = tf.transpose(result)

    return result

# ------------------------------------------------------
#                 wavelets
memory.py 文件源码 项目:DNC 作者: bgavran 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def init_memory(self, batch_size):
        """
        Returns the memory state for step 0. Used in DNC for the argument to tf.while_loop

        :return: 
        """
        read_weightings = tf.fill([batch_size, self.memory_size, self.num_read_heads], Memory.epsilon)
        write_weighting = tf.fill([batch_size, self.memory_size], Memory.epsilon, name="Write_weighting")
        precedence_weighting = tf.zeros([batch_size, self.memory_size], name="Precedence_weighting")
        m = tf.fill([batch_size, self.memory_size, self.word_size], Memory.epsilon)  # initial memory matrix
        usage_vector = tf.zeros([batch_size, self.memory_size], name="Usage_vector")
        link_matrix = tf.zeros([batch_size, self.memory_size, self.memory_size])
        read_vectors = tf.fill([batch_size, self.num_read_heads, self.word_size], Memory.epsilon)

        return [read_weightings, write_weighting, usage_vector, precedence_weighting, m, link_matrix, read_vectors]
controller.py 文件源码 项目:DNC 作者: bgavran 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def step(self, x, state, step):
        """
        Returns the output vector for just one time step.
        But I'm not sure anymore how much does all of this work since because of the way tf.while_loop is implemented...

        :param x: one vector representing input for one time step
        :param state: state of the controller
        :param step: current time step
        :return: output of the controller and its current state
        """
        raise NotImplementedError()
inputs.py 文件源码 项目:tf_classification 作者: visipedia 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def get_distorted_inputs(original_image, bboxes, cfg, add_summaries):

    distorter = DistortedInputs(cfg, add_summaries)
    num_bboxes = tf.shape(bboxes)[0]
    distorted_inputs = tf.TensorArray(
        dtype=tf.float32,
        size=num_bboxes,
        element_shape=tf.TensorShape([1, cfg.INPUT_SIZE, cfg.INPUT_SIZE, 3])
    )

    if add_summaries:
        image_summaries = tf.TensorArray(
            dtype=tf.float32,
            size=4,
            element_shape=tf.TensorShape([1, cfg.INPUT_SIZE, cfg.INPUT_SIZE, 3])
        )
    else:
        image_summaries = tf.constant([])

    current_index = tf.constant(0, dtype=tf.int32)

    loop_vars = [original_image, bboxes, distorted_inputs, image_summaries, current_index]
    original_image, bboxes, distorted_inputs, image_summaries, current_index = tf.while_loop(
        cond=bbox_crop_loop_cond,
        body=distorter.apply,
        loop_vars=loop_vars,
        parallel_iterations=10, back_prop=False, swap_memory=False
    )

    distorted_inputs = distorted_inputs.concat()

    if add_summaries:
        tf.summary.image('0.original_image', image_summaries.read(0))
        tf.summary.image('1.image_with_random_crop', image_summaries.read(1))
        tf.summary.image('2.cropped_resized_image', image_summaries.read(2))
        tf.summary.image('3.final_distorted_image', image_summaries.read(3))


    return distorted_inputs
blocks.py 文件源码 项目:fold 作者: tensorflow 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def eval(self, inp, feed_dict=None, session=None, tolist=False,
           use_while_loop=True):
    """Evaluates this block on `inp` in a TF session.

    Intended for testing and interactive development. If there are any
    uninitialized variables, they will be initialized prior to evaluation.

    Args:
      inp: An input to the block.
      feed_dict: A dictionary that maps `Tensor` objects to feed values.
      session: The TF session to be used. Defaults to the default session.
      tolist: A bool; whether to return (possibly nested) Python lists
        in place of NumPy arrays.
      use_while_loop: A bool; whether to use a `tf.while_loop` in evaluation
        (default) or to unroll the loop. Provided for testing and debugging,
        should not affect the result.

    Returns:
      The result of running the block. If `output_type` is tensor, then a
      NumPy array (or Python list, if `tolist` is true). If a tuple, then a
      tuple. If a sequence, then a list, or an instance of itertools.repeat
      in the case of an infinite sequence. If metrics are defined then `eval`
      returns a `(result, metrics)` tuple, where `metrics` is a dict mapping
      metric names to NumPy arrays.

    Raises:
      ValueError: If `session` is none and no default session is registered.
        If the block contains no TF tensors or ops then a session is not
        required.
    """
    # pylint: disable=protected-access
    return tensorflow_fold.blocks.block_compiler.Compiler._interactive(  # pylint: disable=line-too-long
        self)._eval(inp, feed_dict, session, tolist, use_while_loop)
hmc.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _init_step_size(self, q, p, mass, get_gradient, get_log_posterior):
        factor = 1.5

        def loop_cond(step_size, last_acceptance_rate, cond):
            return cond

        def loop_body(step_size, last_acceptance_rate, cond):
            # Calculate acceptance_rate
            new_q, new_p = leapfrog_integrator(
                q, p, tf.constant(0.0), step_size / 2,
                get_gradient, mass)
            new_q, new_p = leapfrog_integrator(
                new_q, new_p, step_size, step_size / 2,
                get_gradient, mass)
            __, _, _, _, acceptance_rate = get_acceptance_rate(
                q, p, new_q, new_p,
                get_log_posterior, mass, self.data_axes)

            acceptance_rate = tf.reduce_mean(acceptance_rate)

            # Change step size and stopping criteria
            new_step_size = tf.cond(
                tf.less(acceptance_rate,
                        self.target_acceptance_rate),
                lambda: step_size * (1.0 / factor),
                lambda: step_size * factor)

            cond = tf.logical_not(tf.logical_xor(
                tf.less(last_acceptance_rate, self.target_acceptance_rate),
                tf.less(acceptance_rate, self.target_acceptance_rate)))
            return [new_step_size, acceptance_rate, cond]

        new_step_size, _, _ = tf.while_loop(
            loop_cond,
            loop_body,
            [self.step_size, tf.constant(1.0), tf.constant(True)]
        )
        return new_step_size
univariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _sample(self, n_samples):
        try:
            # tf.random_poisson is implemented after v1.2
            random_poisson = tf.random_poisson
        except AttributeError:
            # This algorithm to generate random Poisson-distributed numbers is
            # given by Kunth [1]
            # [1]: https://en.wikipedia.org/wiki/
            #      Poisson_distribution#Generating_Poisson-distributed_random_variables
            shape = tf.concat([[n_samples], self.batch_shape], 0)
            static_n_samples = n_samples if isinstance(n_samples,
                                                       int) else None
            static_shape = tf.TensorShape([static_n_samples]).concatenate(
                self.get_batch_shape())
            enlam = tf.exp(-self.rate)
            x = tf.zeros(shape, dtype=self.dtype)
            prod = tf.ones(shape, dtype=self.param_dtype)

            def loop_cond(prod, x):
                return tf.reduce_any(tf.greater_equal(prod, enlam))

            def loop_body(prod, x):
                prod *= tf.random_uniform(tf.shape(prod), minval=0, maxval=1)
                x += tf.cast(tf.greater_equal(prod, enlam), dtype=self.dtype)
                return prod, x

            _, samples = tf.while_loop(
                loop_cond, loop_body, loop_vars=[prod, x],
                shape_invariants=[static_shape, static_shape])

            samples.set_shape(static_shape)
        else:
            samples = random_poisson(self.rate, [n_samples],
                                     dtype=self.param_dtype)
            if self.param_dtype != self.dtype:
                samples = tf.cast(samples, self.dtype)
        return samples
pondering_rnn.py 文件源码 项目:sonnet 作者: deepmind 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _cond(self, unused_x, unused_cumul_out, unused_prev_state,
            unused_cumul_state, cumul_halting, unused_iteration,
            unused_remainder):
    """The `cond` of the `tf.while_loop`."""
    return tf.reduce_any(cumul_halting < 1)
impl_helper_test.py 文件源码 项目:transform 作者: tensorflow 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def testCreatePhasesWithLoop(self):
    # Test a preprocessing function with control flow.
    #
    # The loop represents
    #
    # i = 0
    # while i < 10:
    #   i += 1
    #   x += 1
    #
    # To get an error in the case where apply_function is not called, we have
    # to call an analyzer first (see testCreatePhasesWithUnwrappedLoop).  So
    # we also do so here.
    def preprocessing_fn(inputs):
      def _subtract_ten(x):
        i = tf.constant(0)
        c = lambda i, x: tf.less(i, 10)
        b = lambda i, x: (tf.add(i, 1), tf.add(x, -1))
        return tf.while_loop(c, b, [i, x])[1]
      scaled_to_0_1 = mappers.scale_to_0_1(
          api.apply_function(_subtract_ten, inputs['x']))
      return {'x_scaled': scaled_to_0_1}

    input_schema = sch.Schema({
        'x': sch.ColumnSchema(tf.int32, [], sch.FixedColumnRepresentation())
    })
    graph, _, _ = impl_helper.run_preprocessing_fn(
        preprocessing_fn, input_schema)
    phases = impl_helper.create_phases(graph)
    self.assertEqual(len(phases), 1)
    self.assertEqual(len(phases[0].analyzers), 2)
impl_helper_test.py 文件源码 项目:transform 作者: tensorflow 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def testCreatePhasesWithUnwrappedLoop(self):
    # Test a preprocessing function with control flow.
    #
    # The loop represents
    #
    # i = 0
    # while i < 10:
    #   i += 1
    #   x += 1
    #
    # We need to call an analyzer after the loop because only the transitive
    # parents of analyzers are inspected by create_phases
    def preprocessing_fn(inputs):
      def _subtract_ten(x):
        i = tf.constant(0)
        c = lambda i, x: tf.less(i, 10)
        b = lambda i, x: (tf.add(i, 1), tf.add(x, -1))
        return tf.while_loop(c, b, [i, x])[1]
      scaled_to_0_1 = mappers.scale_to_0_1(_subtract_ten(inputs['x']))
      return {'x_scaled': scaled_to_0_1}

    input_schema = sch.Schema({
        'x': sch.ColumnSchema(tf.int32, [], sch.FixedColumnRepresentation())
    })
    graph, _, _ = impl_helper.run_preprocessing_fn(
        preprocessing_fn, input_schema)
    with self.assertRaisesRegexp(ValueError, 'Cycle detected'):
      _ = impl_helper.create_phases(graph)
utils.py 文件源码 项目:tf.rasterizer 作者: vahidk 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def sequential_for(fn, begin, end):

    def _cond(i):
        return tf.less(i, end)

    def _body(i):
        ops = fn(i)
        with tf.control_dependencies(ops):
            return i + 1

    return tf.while_loop(_cond, _body, [begin])
hmc.py 文件源码 项目:GPflow 作者: GPflow 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _while_loop(cond, body, args):
    return tf.while_loop(cond, body, args, parallel_iterations=1, back_prop=False)
ssd.py 文件源码 项目:SSD_tensorflow_VOC 作者: LevinJ 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __match_no_miss(self,gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores,jaccard,gt_labels,gt_bboxes, num_anchors):
        #make sure every ground truth box can be matched to at least one anchor box
        max_inds = tf.cast(tf.argmax(jaccard, axis=1),tf.int32)
        def cond(i,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores):
            r = tf.less(i, tf.shape(gt_labels)[0])
            return r
        def body(i,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores):

            #upate gt_anchors_labels
            updates = tf.reshape(gt_labels[i], [-1])
            indices = tf.reshape(max_inds[i],[1,-1])
            shape = tf.reshape(num_anchors,[-1])


            new_labels = tf.scatter_nd(indices, updates, shape)
            new_mask = tf.cast(new_labels, tf.bool)
            gt_anchors_labels = tf.where(new_mask, new_labels, gt_anchors_labels)

            #update gt_anchors_bboxes
            updates = tf.reshape(gt_bboxes[i], [1,-1])
            indices = tf.reshape(max_inds[i],[1,-1])
            shape = tf.shape(gt_anchors_bboxes)
            new_bboxes = tf.scatter_nd(indices, updates, shape)
            gt_anchors_bboxes = tf.where(new_mask, new_bboxes, gt_anchors_bboxes)

            #update gt_anchors_scores
            updates = tf.reshape(jaccard[i, max_inds[i]], [-1])
            indices = tf.reshape(max_inds[i],[1,-1])
            shape = tf.reshape(num_anchors,[-1])
            new_scores = tf.scatter_nd(indices, updates, shape)
            gt_anchors_scores = tf.where(new_mask, new_scores, gt_anchors_scores)



            return [i+1,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores]


        i = 0
        [i,gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores] = tf.while_loop(cond, body,[i,gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores])

        return gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores
ACTAttentionModel.py 文件源码 项目:act-rte-inference 作者: DeNeutoy 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def do_act_steps(self, premise, hypothesis):


        self.rep_size = premise.get_shape()[-1].value

        self.one_minus_eps = tf.constant(1.0 - self.config.eps, tf.float32,[self.batch_size])
        self.N = tf.constant(self.config.max_computation, tf.float32,[self.batch_size])


        prob = tf.constant(0.0,tf.float32,[self.batch_size], name="prob")
        prob_compare = tf.constant(0.0,tf.float32,[self.batch_size], name="prob_compare")
        counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
        initial_state = tf.zeros([self.batch_size, 2*self.rep_size], tf.float32, name="state")
        acc_states = tf.zeros([self.batch_size,2*self.rep_size], tf.float32, name="state_accumulator")
        batch_mask = tf.constant(True, tf.bool,[self.batch_size])

        # While loop stops when this predicate is FALSE.
        # Ie all (probability < 1-eps AND counter < N) are false.

        pred = lambda batch_mask,prob_compare,prob,\
                      counter,state,premise, hypothesis ,acc_state:\
            tf.reduce_any(
                tf.logical_and(
                    tf.less(prob_compare,self.one_minus_eps),
                    tf.less(counter,self.N)))
                # only stop if all of the batch have passed either threshold

            # Do while loop iterations until predicate above is false.
        _,_,remainders,iterations,_,_,_,state = \
            tf.while_loop(pred,self.inference_step,
            [batch_mask,prob_compare,prob,
             counter,initial_state, premise, hypothesis, acc_states])

        return state, remainders, iterations
ACTDAModel.py 文件源码 项目:act-rte-inference 作者: DeNeutoy 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def do_inference_steps(self, initial_state, premise, hypothesis):


        self.one_minus_eps = tf.constant(1.0 - self.config.eps, tf.float32,[self.batch_size])
        self.N = tf.constant(self.config.max_computation, tf.float32,[self.batch_size])


        prob = tf.constant(0.0,tf.float32,[self.batch_size], name="prob")
        prob_compare = tf.constant(0.0,tf.float32,[self.batch_size], name="prob_compare")
        counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
        acc_states = tf.zeros_like(initial_state, tf.float32, name="state_accumulator")
        batch_mask = tf.constant(True, tf.bool,[self.batch_size])

        # While loop stops when this predicate is FALSE.
        # Ie all (probability < 1-eps AND counter < N) are false.

        pred = lambda batch_mask,prob_compare,prob,\
                      counter,state,premise, hypothesis ,acc_state:\
            tf.reduce_any(
                tf.logical_and(
                    tf.less(prob_compare,self.one_minus_eps),
                    tf.less(counter,self.N)))
                # only stop if all of the batch have passed either threshold

            # Do while loop iterations until predicate above is false.
        _,_,remainders,iterations,_,_,_,state = \
            tf.while_loop(pred,self.inference_step,
            [batch_mask,prob_compare,prob,
             counter,initial_state,premise, hypothesis, acc_states])

        return state, remainders, iterations
AdaptiveIAAModel.py 文件源码 项目:act-rte-inference 作者: DeNeutoy 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def do_inference_steps(self, initial_state, premise, hypothesis):


        self.one_minus_eps = tf.constant(1.0 - self.config.eps, tf.float32,[self.batch_size])
        self.N = tf.constant(self.config.max_computation, tf.float32,[self.batch_size])


        prob = tf.constant(0.0,tf.float32,[self.batch_size], name="prob")
        prob_compare = tf.constant(0.0,tf.float32,[self.batch_size], name="prob_compare")
        counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
        acc_states = tf.zeros_like(initial_state, tf.float32, name="state_accumulator")
        batch_mask = tf.constant(True, tf.bool,[self.batch_size])

        # While loop stops when this predicate is FALSE.
        # Ie all (probability < 1-eps AND counter < N) are false.

        pred = lambda batch_mask,prob_compare,prob,\
                      counter,state,premise, hypothesis ,acc_state:\
            tf.reduce_any(
                tf.logical_and(
                    tf.less(prob_compare,self.one_minus_eps),
                    tf.less(counter,self.N)))
                # only stop if all of the batch have passed either threshold

            # Do while loop iterations until predicate above is false.
        _,_,remainders,iterations,_,_,_,state = \
            tf.while_loop(pred,self.inference_step,
            [batch_mask,prob_compare,prob,
             counter,initial_state,premise, hypothesis, acc_states])

        return state, remainders, iterations


问题


面经


文章

微信
公众号

扫码关注公众号