python类while_loop()的实例源码

span_prediction_ops.py 文件源码 项目:document-qa 作者: allenai 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def best_span_from_bounds(start_logits, end_logits, bound=None):
    """
    Brute force approach to finding the best span from start/end logits in tensorflow, still usually
    faster then the python dynamic-programming version
    """
    b = tf.shape(start_logits)[0]

    # Using `top_k` to get the index and value at once is faster
    # then using argmax and then gather to get in the value
    top_k = tf.nn.top_k(start_logits + end_logits, k=1)
    values, indices = [tf.squeeze(x, axis=[1]) for x in top_k]

    # Convert to (start_position, length) format
    indices = tf.stack([indices, tf.fill((b,), 0)], axis=1)

    # TODO Might be better to build the batch x n_word x n_word
    # matrix and use tf.matrix_band to zero out the unwanted ones...

    if bound is None:
        n_lengths = tf.shape(start_logits)[1]
    else:
        # take the min in case the bound > the context
        n_lengths = tf.minimum(bound, tf.shape(start_logits)[1])

    def compute(i, values, indices):
        top_k = tf.nn.top_k(start_logits[:, :-i] + end_logits[:, i:])
        b_values, b_indices = [tf.squeeze(x, axis=[1]) for x in top_k]

        b_indices = tf.stack([b_indices, tf.fill((b, ), i)], axis=1)
        indices = tf.where(b_values > values, b_indices, indices)
        values = tf.maximum(values, b_values)
        return i+1, values, indices

    _, values, indices = tf.while_loop(
        lambda ix, values, indices: ix < n_lengths,
        compute,
        [1, values, indices],
        back_prop=False)

    spans = tf.stack([indices[:, 0], indices[:, 0] + indices[:, 1]], axis=1)
    return spans, values
autoregressive.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def decoding_loop(self, train_mode: bool, sample: bool = False) -> Tuple[
            tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
        """Run the decoding while loop.

        Calls get_initial_loop_state and constructs tf.while_loop
        with the continuation criterion returned from loop_continue_criterion,
        and body function returned from get_body.

        After finishing the tf.while_loop, it calls finalize_loop
        to further postprocess the final decoder loop state (usually
        by stacking TensorArrays containing decoding histories).

        Arguments:
            train_mode: Boolean flag, telling whether this is
                a training run.
            sample: Boolean flag, telling whether we should sample
                the output symbols from the output distribution instead
                of using argmax or gold data.
        """
        initial_loop_state = self.get_initial_loop_state()
        final_loop_state = tf.while_loop(
            self.loop_continue_criterion,
            self.get_body(train_mode, sample),
            initial_loop_state)

        self.finalize_loop(final_loop_state, train_mode)

        logits = final_loop_state.histories.logits.stack()
        decoder_outputs = final_loop_state.histories.decoder_outputs.stack()
        decoded = final_loop_state.histories.outputs.stack()

        # TODO mask should include also the end symbol
        mask = final_loop_state.histories.mask.stack()

        return logits, decoder_outputs, mask, decoded
beam_search_decoder.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _decoding_loop(self) -> BeamSearchOutput:
        # collect attention objects
        beam_body = self.get_body()

        initial_loop_state = self.get_initial_loop_state()

        def cond(*args) -> tf.Tensor:
            bsls = BeamSearchLoopState(*args)
            return tf.less(
                bsls.decoder_loop_state.feedables.step - 1, self._max_steps)

        # First step has to be run manually because while_loop needs the same
        # shapes between steps and the first beam state is not beam-sized, but
        # just a single state.
        #
        # When running ensembles, we want to provide
        # ensembled logprobs to the beam_body before manually running
        # the first step
        next_bs_loop_state = tf.cond(
            cond(*initial_loop_state),
            lambda: beam_body(*initial_loop_state),
            lambda: initial_loop_state)

        final_state = tf.while_loop(cond, beam_body, next_bs_loop_state)
        dec_loop_state = final_state.decoder_loop_state
        bs_state = final_state.bs_state

        scores = final_state.bs_output.scores.stack()
        parent_ids = final_state.bs_output.parent_ids.stack()
        token_ids = final_state.bs_output.token_ids.stack()

        # TODO: return att_loop_states properly
        return BeamSearchOutput(
            last_search_step_output=SearchStepOutput(
                scores=scores,
                parent_ids=parent_ids,
                token_ids=token_ids),
            last_dec_loop_state=dec_loop_state.feedables,
            last_search_state=bs_state,
            attention_loop_states=[])
autoregressive.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def decoding_loop(self, train_mode: bool, sample: bool = False) -> Tuple[
            tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
        """Run the decoding while loop.

        Calls get_initial_loop_state and constructs tf.while_loop
        with the continuation criterion returned from loop_continue_criterion,
        and body function returned from get_body.

        After finishing the tf.while_loop, it calls finalize_loop
        to further postprocess the final decoder loop state (usually
        by stacking TensorArrays containing decoding histories).

        Arguments:
            train_mode: Boolean flag, telling whether this is
                a training run.
            sample: Boolean flag, telling whether we should sample
                the output symbols from the output distribution instead
                of using argmax or gold data.
        """
        initial_loop_state = self.get_initial_loop_state()
        final_loop_state = tf.while_loop(
            self.loop_continue_criterion,
            self.get_body(train_mode, sample),
            initial_loop_state)

        self.finalize_loop(final_loop_state, train_mode)

        logits = final_loop_state.histories.logits.stack()
        decoder_outputs = final_loop_state.histories.decoder_outputs.stack()
        decoded = final_loop_state.histories.outputs.stack()

        # TODO mask should include also the end symbol
        mask = final_loop_state.histories.mask.stack()

        return logits, decoder_outputs, mask, decoded
beam_search_decoder.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _decoding_loop(self) -> BeamSearchOutput:
        # collect attention objects
        beam_body = self.get_body()

        initial_loop_state = self.get_initial_loop_state()

        def cond(*args) -> tf.Tensor:
            bsls = BeamSearchLoopState(*args)
            return tf.less(
                bsls.decoder_loop_state.feedables.step - 1, self._max_steps)

        # First step has to be run manually because while_loop needs the same
        # shapes between steps and the first beam state is not beam-sized, but
        # just a single state.
        #
        # When running ensembles, we want to provide
        # ensembled logprobs to the beam_body before manually running
        # the first step
        next_bs_loop_state = tf.cond(
            cond(*initial_loop_state),
            lambda: beam_body(*initial_loop_state),
            lambda: initial_loop_state)

        final_state = tf.while_loop(cond, beam_body, next_bs_loop_state)
        dec_loop_state = final_state.decoder_loop_state
        bs_state = final_state.bs_state

        scores = final_state.bs_output.scores.stack()
        parent_ids = final_state.bs_output.parent_ids.stack()
        token_ids = final_state.bs_output.token_ids.stack()

        # TODO: return att_loop_states properly
        return BeamSearchOutput(
            last_search_step_output=SearchStepOutput(
                scores=scores,
                parent_ids=parent_ids,
                token_ids=token_ids),
            last_dec_loop_state=dec_loop_state.feedables,
            last_search_state=bs_state,
            attention_loop_states=[])
autoregressive.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def decoding_loop(self, train_mode: bool, sample: bool = False) -> Tuple[
            tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
        """Run the decoding while loop.

        Calls get_initial_loop_state and constructs tf.while_loop
        with the continuation criterion returned from loop_continue_criterion,
        and body function returned from get_body.

        After finishing the tf.while_loop, it calls finalize_loop
        to further postprocess the final decoder loop state (usually
        by stacking TensorArrays containing decoding histories).

        Arguments:
            train_mode: Boolean flag, telling whether this is
                a training run.
            sample: Boolean flag, telling whether we should sample
                the output symbols from the output distribution instead
                of using argmax or gold data.
        """
        initial_loop_state = self.get_initial_loop_state()
        final_loop_state = tf.while_loop(
            self.loop_continue_criterion,
            self.get_body(train_mode, sample),
            initial_loop_state)

        self.finalize_loop(final_loop_state, train_mode)

        logits = final_loop_state.histories.logits.stack()
        decoder_outputs = final_loop_state.histories.decoder_outputs.stack()
        decoded = final_loop_state.histories.outputs.stack()

        # TODO mask should include also the end symbol
        mask = final_loop_state.histories.mask.stack()

        return logits, decoder_outputs, mask, decoded
beam_search_decoder.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _decoding_loop(self) -> BeamSearchOutput:
        # collect attention objects
        beam_body = self.get_body()

        initial_loop_state = self.get_initial_loop_state()

        def cond(*args) -> tf.Tensor:
            bsls = BeamSearchLoopState(*args)
            return tf.less(
                bsls.decoder_loop_state.feedables.step - 1, self._max_steps)

        # First step has to be run manually because while_loop needs the same
        # shapes between steps and the first beam state is not beam-sized, but
        # just a single state.
        #
        # When running ensembles, we want to provide
        # ensembled logprobs to the beam_body before manually running
        # the first step
        next_bs_loop_state = tf.cond(
            cond(*initial_loop_state),
            lambda: beam_body(*initial_loop_state),
            lambda: initial_loop_state)

        final_state = tf.while_loop(cond, beam_body, next_bs_loop_state)
        dec_loop_state = final_state.decoder_loop_state
        bs_state = final_state.bs_state

        scores = final_state.bs_output.scores.stack()
        parent_ids = final_state.bs_output.parent_ids.stack()
        token_ids = final_state.bs_output.token_ids.stack()

        # TODO: return att_loop_states properly
        return BeamSearchOutput(
            last_search_step_output=SearchStepOutput(
                scores=scores,
                parent_ids=parent_ids,
                token_ids=token_ids),
            last_dec_loop_state=dec_loop_state.feedables,
            last_search_state=bs_state,
            attention_loop_states=[])
iterative.py 文件源码 项目:tensorforce 作者: reinforceio 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def tf_solve(self, fn_x, x_init, *args):
        """
        Iteratively solves an equation/optimization for $x$ involving an expression $f(x)$.

        Args:
            fn_x: A callable returning an expression $f(x)$ given $x$.
            x_init: Initial solution guess $x_0$.
            *args: Additional solver-specific arguments.

        Returns:
            A solution $x$ to the problem as given by the solver.
        """
        self.fn_x = fn_x

        # Initialization step
        args = self.initialize(x_init, *args)

        # Iteration loop with termination condition
        if self.unroll_loop:
            # Unrolled for loop
            for _ in range(self.max_iterations):
                next_step = self.next_step(*args)
                step = (lambda: self.step(*args))
                do_nothing = (lambda: args)
                args = tf.cond(pred=next_step, true_fn=step, false_fn=do_nothing)

        else:
            # TensorFlow while loop
            args = tf.while_loop(cond=self.next_step, body=self.step, loop_vars=args)

        # First argument contains solution
        return args[0]
yolo_v1.py 文件源码 项目:yolo-tensorflow 作者: persistforever 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def calculate_loss(self, logits):
        # ??class_pred?box_pred
        self.box_preds = logits

        # ?????example
        results = tf.while_loop(
            cond=self._one_example_cond, 
            body=self._one_example_body, 
            loop_vars=[tf.constant(0), self.batch_size,
                       tf.constant(0.0), tf.constant(0.0), tf.constant(0.0), tf.constant(0.0),
                       tf.constant(0.0), tf.constant(0.0), tf.constant(0.0), 
                       tf.constant(0.0), tf.constant(0.0)])
        coord_loss = results[2]
        object_loss = results[3]
        noobject_loss = results[4]
        class_loss = results[5]
        iou_value = results[6]
        object_value = results[7]
        anyobject_value = results[8]
        recall_value = results[9]
        class_value = results[10]

        # ?????
        coord_loss = coord_loss * self.coord_scale / self.batch_size
        object_loss = object_loss * self.object_scale / self.batch_size
        noobject_loss = noobject_loss * self.noobject_scale / self.batch_size
        class_loss = class_loss * self.class_scale / self.batch_size
        # ???
        iou_value /= tf.reduce_sum(tf.cast(self.object_nums, tf.float32), axis=[0])
        object_value /= tf.reduce_sum(tf.cast(self.object_nums, tf.float32), axis=[0])
        anyobject_value /= (self.batch_size * self.cell_size * self.cell_size * self.n_boxes)
        recall_value /= tf.reduce_sum(tf.cast(self.object_nums, tf.float32), axis=[0])
        class_value /= tf.reduce_sum(tf.cast(self.object_nums, tf.float32), axis=[0])

        return coord_loss, object_loss, noobject_loss, class_loss, \
            iou_value, object_value, anyobject_value, recall_value, class_value
yolo_prepare.py 文件源码 项目:yolo-tensorflow 作者: persistforever 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def calculate_loss(self, logits):
        logits = tf.reshape(
            logits, shape=[self.batch_size, self.cell_size, self.cell_size, 
                           self.n_boxes, 5])

        # ??class_pred?box_pred
        self.box_preds = tf.concat(
            [tf.sigmoid(logits[:,:,:,:,0:2]),
             logits[:,:,:,:,2:4],
             tf.sigmoid(logits[:,:,:,:,4:5])], axis=4)

        # ?????example
        results = tf.while_loop(
            cond=self._one_example_cond, 
            body=self._one_example_body, 
            loop_vars=[tf.constant(0), self.batch_size,
                       tf.constant(0.0), tf.constant(0.0), tf.constant(0.0),
                       tf.constant(0.0), tf.constant(0.0), tf.constant(0.0), tf.constant(0.0)])
        coord_loss = results[2]
        object_loss = results[3]
        noobject_loss = results[4]
        iou_value = results[5]
        object_value = results[6]
        anyobject_value = results[7]
        recall_value = results[8]

        # ?????
        coord_loss = coord_loss * self.coord_scale / self.batch_size
        object_loss = object_loss * self.object_scale / self.batch_size
        noobject_loss = noobject_loss * self.noobject_scale / self.batch_size
        # ???
        iou_value /= tf.reduce_sum(tf.cast(self.object_nums, tf.float32), axis=[0])
        object_value /= tf.reduce_sum(tf.cast(self.object_nums, tf.float32), axis=[0])
        anyobject_value /= (self.batch_size * self.cell_size * self.cell_size * self.n_boxes)
        recall_value /= tf.reduce_sum(tf.cast(self.object_nums, tf.float32), axis=[0])

        return coord_loss, object_loss, noobject_loss, \
            iou_value, object_value, anyobject_value, recall_value
mrt_utils.py 文件源码 项目:THUMT 作者: thumt 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def sampler(symbols_to_logits_fn, initial_ids, sample_num, decode_length,
            vocab_size, eos_id, features=None):
    batch_size = tf.shape(initial_ids)[0]

    # Expand each batch to sample_num
    seqlen = tf.constant(0)
    alive_seq = tf.tile(tf.expand_dims(initial_ids, 1), [1, sample_num])
    alive_seq = tf.expand_dims(alive_seq, 2)  # (batch_size, sample_num, 1)
    sa = tf.shape(alive_seq)
    alive_seq = tf.reshape(alive_seq, [sa[0]*sa[1],1])

    def _is_finished(i, alive_seq):
        return i < decode_length

    def inner_loop(i, alive_seq):
        logit = symbols_to_logits_fn(alive_seq)[0]
        new_samples = tf.multinomial(logit, 1)
        new_samples = tf.to_int32(new_samples)
        alive_seq = tf.concat([alive_seq, new_samples], 1)
        return (i + 1, alive_seq)

    (_, alive_seq) = tf.while_loop(
        _is_finished,
        inner_loop,
        [seqlen, alive_seq],
        shape_invariants=[
            tf.TensorShape([]),
            tf.TensorShape([None, None])
        ],
        parallel_iterations=1,
        back_prop=False
    )
    alive_seq.set_shape((sample_num, None))

    return alive_seq
layers.py 文件源码 项目:mist-rnns 作者: rdipietro 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _compute_states(self):
    """ Compute hidden states.

    Returns:
      A tuple, (outputs, states).
    """

    _inputs = tf.transpose(self.inputs, [1, 0, 2])
    x_ta = tf.TensorArray(tf.float32, size=self.length).unstack(_inputs)
    h_ta = tf.TensorArray(tf.float32, size=self.length)

    def cond(t, h, h_ta):
      return tf.less(t, self.length)

    def body(t, h, h_ta):

      x = x_ta.read(t)
      num_units, input_size = self.num_hidden_units, self.input_size

      with tf.variable_scope('simple_rnn'):
        h_new = self.activation(self._linear(h, x, num_units, scope='simple_rnn'))

      h_ta_new = h_ta.write(t, h_new)
      return t + 1, h_new, h_ta_new

    t = tf.constant(0)
    h = tf.squeeze(self.initial_states, [1])
    _, _, h_ta = tf.while_loop(cond, body, [t, h, h_ta])

    states = tf.transpose(h_ta.stack(), [1, 0, 2], name='states')
    outputs = tf.identity(states, name='outputs')
    return outputs, states
layers.py 文件源码 项目:mist-rnns 作者: rdipietro 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _compute_states(self):

    _inputs = tf.transpose(self.inputs, [1, 0, 2])
    x_ta = tf.TensorArray(tf.float32, size=self.length).unstack(_inputs)
    h_ta = tf.TensorArray(tf.float32, size=self.length)
    c_ta = tf.TensorArray(tf.float32, size=self.length)

    def cond(t, c, h, c_ta, h_ta):
      return tf.less(t, self.length)

    def body(t, c, h, c_ta, h_ta):

      x = x_ta.read(t)
      num_units, input_size = self.num_hidden_units, self.input_size

      with tf.variable_scope('lstm'):
        c_tilde = self.activation(self._linear(h, x, num_units, scope='c'))
        i = tf.nn.sigmoid(self._linear(h, x, num_units, scope='i'))
        f = tf.nn.sigmoid(self._linear(h, x, num_units, shift=self.optional_bias_shift, scope='f'))
        o = tf.nn.sigmoid(self._linear(h, x, num_units, scope='o'))
        c_new = i * c_tilde + f * c
        h_new = o * self.activation(c_new)

      c_ta_new = c_ta.write(t, c_new)
      h_ta_new = h_ta.write(t, h_new)
      return t + 1, c_new, h_new, c_ta_new, h_ta_new

    t = tf.constant(0)
    c, h = tf.split(tf.squeeze(self.initial_states, [1]), 2, axis=1)
    _, _, _, c_ta, h_ta = tf.while_loop(cond, body, [t, c, h, c_ta, h_ta])

    outputs = tf.transpose(h_ta.stack(), [1, 0, 2], name='outputs')
    cells = tf.transpose(c_ta.stack(), [1, 0, 2])
    states = tf.concat([cells, outputs], axis=2, name='states')
    return outputs, states
layers.py 文件源码 项目:mist-rnns 作者: rdipietro 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _compute_states(self):

    _inputs = tf.transpose(self.inputs, [1, 0, 2])
    x_ta = tf.TensorArray(tf.float32, size=self.length).unstack(_inputs)
    h_ta = tf.TensorArray(tf.float32, size=self.length)

    def cond(t, h, h_ta):
      return tf.less(t, self.length)

    def body(t, h, h_ta):

      x = x_ta.read(t)
      num_units, input_size = self.num_hidden_units, self.input_size

      with tf.variable_scope('gru'):
        r = tf.nn.sigmoid(self._linear(h, x, num_units, scope='r'))
        h_pre_act = r * h
        h_tilde = self.activation(self._linear(h_pre_act, x, num_units, scope='h'))

        z = tf.nn.sigmoid(self._linear(h, x, num_units, shift=self.optional_bias_shift, scope='z'))
        h_new = z * h + (1 - z) * h_tilde

      h_ta_new = h_ta.write(t, h_new)
      return t + 1, h_new, h_ta_new

    t = tf.constant(0)
    h = tf.squeeze(self.initial_states, [1])
    _, _, h_ta = tf.while_loop(cond, body, [t, h, h_ta])

    states = tf.transpose(h_ta.stack(), [1, 0, 2], name='states')
    outputs = tf.identity(states, name='outputs')
    return outputs, states
mgp-rnn-fit.py 文件源码 项目:MGP-RNN 作者: jfutoma 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def get_GP_samples(Y,T,X,ind_kf,ind_kt,num_obs_times,num_obs_values,
                   num_rnn_grid_times,med_cov_grid):
    """
    returns samples from GP at evenly-spaced gridpoints
    """ 
    grid_max = tf.shape(X)[1]
    Z = tf.zeros([0,grid_max,input_dim])

    N = tf.shape(T)[0] #number of observations

    #setup tf while loop (have to use this bc loop size is variable)
    def cond(i,Z):
        return i<N

    def body(i,Z):
        Yi = tf.reshape(tf.slice(Y,[i,0],[1,num_obs_values[i]]),[-1])
        Ti = tf.reshape(tf.slice(T,[i,0],[1,num_obs_times[i]]),[-1])
        ind_kfi = tf.reshape(tf.slice(ind_kf,[i,0],[1,num_obs_values[i]]),[-1])
        ind_kti = tf.reshape(tf.slice(ind_kt,[i,0],[1,num_obs_values[i]]),[-1])
        Xi = tf.reshape(tf.slice(X,[i,0],[1,num_rnn_grid_times[i]]),[-1])
        X_len = num_rnn_grid_times[i]

        GP_draws = draw_GP(Yi,Ti,Xi,ind_kfi,ind_kti)
        pad_len = grid_max-X_len #pad by this much
        padded_GP_draws = tf.concat([GP_draws,tf.zeros((n_mc_smps,pad_len,M))],1) 

        medcovs = tf.slice(med_cov_grid,[i,0,0],[1,-1,-1])
        tiled_medcovs = tf.tile(medcovs,[n_mc_smps,1,1])
        padded_GPdraws_medcovs = tf.concat([padded_GP_draws,tiled_medcovs],2)

        Z = tf.concat([Z,padded_GPdraws_medcovs],0)        

        return i+1,Z  

    i = tf.constant(0)
    i,Z = tf.while_loop(cond,body,loop_vars=[i,Z],
                shape_invariants=[i.get_shape(),tf.TensorShape([None,None,None])])

    return Z
util.py 文件源码 项目:MGP-RNN 作者: jfutoma 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def CG(A,b):
    """ Conjugate gradient, to get solution x = A^-1 * b,
    can be faster than using the Cholesky for large scale problems
    """
    b = tf.reshape(b,[-1])
    n = tf.shape(A)[0]
    x = tf.zeros([n]) 
    r_ = b 
    p = r_ 

    #These settings are somewhat arbitrary
    #You might want to test sensitivity to these
    CG_EPS = tf.cast(n/1000,"float")
    MAX_ITER = tf.div(n,250) + 3

    def cond(i,x,r,p):
        return tf.logical_and(i < MAX_ITER, tf.norm(r) > CG_EPS)

    def body(i,x,r_,p):        
        p_vec = tf.reshape(p,[-1,1])
        Ap = tf.reshape(tf.matmul(A,p_vec),[-1]) #make a vector

        alpha = dot(r_,r_)/dot(p,Ap)
        x = x + alpha*p
        r = r_ - alpha*Ap
        beta = dot(r,r)/dot(r_,r_)
        p = r + beta*p

        return i+1,x,r,p

    i = tf.constant(0)
    i,x,r,p = tf.while_loop(cond,body,loop_vars=[i,x,r_,p])

    return tf.reshape(x,[-1,1])
util.py 文件源码 项目:MGP-RNN 作者: jfutoma 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def block_CG(A_,B_):
    """
    block version of CG. Get solution to matrix equation AX = B, ie
    X = A^-1 * B. Will be much faster than Cholesky for large-scale problems.
    """
    n = tf.shape(B_)[0]
    m = tf.shape(B_)[1]

    X = tf.zeros((n,m))
    V_ = tf.zeros((n,m))
    R = B_
    R_ = tf.matrix_set_diag(tf.zeros((n,m)),tf.ones([m]))

    #somewhat arbitrary again, may want to check sensitivity
    CG_EPS = tf.cast(n/1000,"float")
    MAX_ITER = tf.div(n,250) + 3

    def cond(i,X,R_,R,V_):
        return tf.logical_and(i < MAX_ITER, tf.norm(R) > CG_EPS)

    def body(i,X,R_,R,V_):   
        S = tf.matrix_solve(tf.matmul(tf.transpose(R_),R_),
                            tf.matmul(tf.transpose(R),R))
        V = R + tf.matmul(V_,S)
        T = tf.matrix_solve(tf.matmul(tf.transpose(V),tf.matmul(A_,V)),
                            tf.matmul(tf.transpose(R),R))
        X = X + tf.matmul(V,T)
        V_ = V
        R_ = R
        R = R - tf.matmul(A_,tf.matmul(V,T))
        return i+1,X,R_,R,V_

    i = tf.constant(0)
    i,X,_,_,_ = tf.while_loop(cond,body,[i,X,R_,R,V_])
    return X
DDTL_alex.py 文件源码 项目:tensorflow-DDT 作者: wangchao66 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def source_distance(x,y):
    y = tf.cast(tf.argmax(y,axis=1),tf.float32)
    y1,_,_ = tf.unique_with_counts(y)
    TensorArr = tf.TensorArray(tf.float32,size=1, dynamic_size=True,clear_after_read=False)
    x_array = TensorArr.unstack(y1)
    size = x_array.size()
    initial_outputs = tf.TensorArray(dtype=tf.float32,size=size)
    i = tf.constant(0)
    def should_continue(i, *args):
        return i < size
    def loop(i,output):
        y_class = x_array.read(i)
        idx_i = tf.where(tf.equal(y,y_class))
        xi = tf.gather_nd(x,idx_i)
        initial_outputs1 = tf.TensorArray(dtype=tf.float32,size=size)
        j = tf.constant(0)
        def should_continue1(j,*args):
            return j<size
        def loop1(j,output1):
            y2=x_array.read(j)
            idx_j = tf.where(tf.equal(y,y2))
            xj = tf.gather_nd(x,idx_j)
            dis = tf.reduce_mean (tf.square(tf.reduce_mean(xi,0)
                        -tf.reduce_mean(xj,0)))
            output1 = output1.write(j,dis)
            return j+1,output1
        j,r1=tf.while_loop(should_continue1,loop1,[j,initial_outputs1])
        output = output.write(i,r1.stack())
        return i+1,output
    i,r = tf.while_loop(should_continue,loop,[i,initial_outputs])
    out = r.stack()
    return out
DDTL_resnet.py 文件源码 项目:tensorflow-DDT 作者: wangchao66 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def source_distance(x,y):
    y = tf.cast(tf.argmax(y,axis=1),tf.float32)
    y1,_,_ = tf.unique_with_counts(y)
    TensorArr = tf.TensorArray(tf.float32,size=1, dynamic_size=True,clear_after_read=False)
    x_array = TensorArr.unstack(y1)
    size = x_array.size()
    initial_outputs = tf.TensorArray(dtype=tf.float32,size=size)
    i = tf.constant(0)
    def should_continue(i, *args):
        return i < size
    def loop(i,output):
        y_class = x_array.read(i)
        idx_i = tf.where(tf.equal(y,y_class))
        xi = tf.gather_nd(x,idx_i)
        initial_outputs1 = tf.TensorArray(dtype=tf.float32,size=size)
        j = tf.constant(0)
        def should_continue1(j,*args):
            return j<size
        def loop1(j,output1):
            y2=x_array.read(j)
            idx_j = tf.where(tf.equal(y,y2))
            xj = tf.gather_nd(x,idx_j)
            dis = tf.reduce_mean (tf.square(tf.reduce_mean(xi,0)
                        -tf.reduce_mean(xj,0)))
            output1 = output1.write(j,dis)
            return j+1,output1
        j,r1=tf.while_loop(should_continue1,loop1,[j,initial_outputs1])
        output = output.write(i,r1.stack())
        return i+1,output
    i,r = tf.while_loop(should_continue,loop,[i,initial_outputs])
    out = r.stack()
    return out
question_encoding.py 文件源码 项目:Constituent-Centric-Neural-Architecture-for-Reading-Comprehension 作者: shrshore 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def process_leafs(self,inodes_h,inodes_c,emb_leaves):
        num_leaves = self.num_leaves
        embx=tf.gather(emb_leaves,tf.range(num_leaves))
        leaf_parent=tf.gather(self.t_par_leaf,tf.range(num_leaves))
        node_h=tf.identity(inodes_h)
        node_c=tf.identity(inodes_c)
        with tf.variable_scope('td_Composition',reuse=True):
            cW=tf.get_variable('cW',[self.hidden_dim+self.emb_dim,4*self.hidden_dim])
            cb=tf.get_variable('cb',[4*self.hidden_dim])
            bu,bo,bi,bf=tf.split(axis=0,num_or_size_splits=4,value=cb)
            idx_var=tf.constant(0)
            logging.warn('begin enumerate the idx_var')
            def _recurceleaf(node_h, node_c,idx_var):
                node_info=tf.gather(leaf_parent, idx_var)
                cur_embed=tf.gather(embx, idx_var)
                #initial node_h:[inode_size, dim_hidden]
                parent_h=tf.gather(node_h, node_info)
                parent_c=tf.gather(node_c, node_info)
                cur_input=tf.concat(values=[parent_h, cur_embed],axis=0)
                flat_=tf.reshape(cur_input, [-1])

                tmp=tf.matmul(tf.expand_dims(flat_,0),cW)

                u,o,i,f=tf.split(axis=1,num_or_size_splits=4,value=tmp)
                i=tf.nn.sigmoid(i+bi)
                o=tf.nn.sigmoid(o+bo)
                u=tf.nn.sigmoid(u+bu)
                f=tf.nn.sigmoid(f+bf)
                c=i*u+tf.reduce_sum(f*parent_c,[0])
                h=o*tf.nn.tanh(c)

                node_h=tf.concat(axis=0,values=[node_h,h])
                node_c=tf.concat(axis=0,values=[node_c,c])
                idx_var=tf.add(idx_var,1)
                return node_h, node_c, idx_var
            loop_cond=lambda a1,b1,idx_var:tf.less(idx_var,num_leaves)
            loop_vars=[node_h,node_c,idx_var]
            node_h,node_c,idx_var=tf.while_loop(loop_cond, _recurceleaf,loop_vars,shape_invariants=[tf.TensorShape([None,self.hidden_dim]),tf.TensorShape([None,self.hidden_dim]),idx_var.get_shape()])
            logging.warn('return new node_h, finished')
            return node_h,node_c


问题


面经


文章

微信
公众号

扫码关注公众号