python类cond()的实例源码

dqn_utils.py 文件源码 项目:relaax 作者: deeplearninc 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def build_graph(self):
        self.ph_local_step = tf.placeholder(tf.int64, [])
        self.ph_q_value = tf.placeholder(tf.float32, [None, dqn_config.config.output.action_size])

        if dqn_config.config.eps.stochastic:
            decay_steps = int(np.random.uniform(*dqn_config.config.eps.decay_steps))
        else:
            decay_steps = dqn_config.config.eps.decay_steps

        eps = tf.train.polynomial_decay(dqn_config.config.eps.initial,
                                        self.ph_local_step,
                                        decay_steps,
                                        dqn_config.config.eps.end)

        return tf.cond(tf.less(tf.random_uniform([]), eps),
                       lambda: tf.random_uniform([], 0, dqn_config.config.output.action_size, dtype=tf.int32),
                       lambda: tf.cast(tf.squeeze(tf.argmax(self.ph_q_value, axis=1)), tf.int32))
monodepth_dataloader.py 文件源码 项目:supic 作者: Hirico 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def read_image(self, image_path):
        # tf.decode_image does not return the image size, this is an ugly workaround to handle both jpeg and png
        path_length = string_length_tf(image_path)[0]
        file_extension = tf.substr(image_path, path_length - 3, 3)
        file_cond = tf.equal(file_extension, 'jpg')

        image  = tf.cond(file_cond, lambda: tf.image.decode_jpeg(tf.read_file(image_path)), lambda: tf.image.decode_png(tf.read_file(image_path)))

        # if the dataset is cityscapes, we crop the last fifth to remove the car hood
        if self.dataset == 'cityscapes':
            o_height    = tf.shape(image)[0]
            crop_height = (o_height * 4) / 5
            image  =  image[:crop_height,:,:]

        image  = tf.image.convert_image_dtype(image,  tf.float32)
        image  = tf.image.resize_images(image,  [self.params.height, self.params.width], tf.image.ResizeMethod.AREA)

        return image
util.py 文件源码 项目:tefla 作者: litan 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def static_cond(pred, fn1, fn2):
    """Return either fn1() or fn2() based on the boolean value of `pred`.

    Same signature as `control_flow_ops.cond()` but requires pred to be a bool.

    Args:
        pred: A value determining whether to return the result of `fn1` or `fn2`.
        fn1: The callable to be performed if pred is true.
        fn2: The callable to be performed if pred is false.

    Returns:
        Tensors returned by the call to either `fn1` or `fn2`.

    Raises:
        TypeError: if `fn1` or `fn2` is not callable.
    """
    if not callable(fn1):
        raise TypeError('fn1 must be callable.')
    if not callable(fn2):
        raise TypeError('fn2 must be callable.')
    if pred:
        return fn1()
    else:
        return fn2()
util.py 文件源码 项目:tefla 作者: litan 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def smart_cond(pred, fn1, fn2, name=None):
    """Return either fn1() or fn2() based on the boolean predicate/value `pred`.

    If `pred` is bool or has a constant value it would use `static_cond`,
     otherwise it would use `tf.cond`.

    Args:
        pred: A scalar determining whether to return the result of `fn1` or `fn2`.
        fn1: The callable to be performed if pred is true.
        fn2: The callable to be performed if pred is false.
        name: Optional name prefix when using tf.cond
    Returns:
        Tensors returned by the call to either `fn1` or `fn2`.
    """
    pred_value = constant_value(pred)
    if pred_value is not None:
        # Use static_cond if pred has a constant value.
        return static_cond(pred_value, fn1, fn2)
    else:
        # Use dynamic cond otherwise.
        return control_flow_ops.cond(pred, fn1, fn2, name)
bingrad_common.py 文件源码 项目:terngrad 作者: wenwei202 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def decode_from_ternary_gradients(grads_and_vars, scalers, shapes):
  """Decode each gradient tensor."""
  with tf.name_scope('ternary_decoder'):
    gradients, variables = zip(*grads_and_vars)
    floating_gradients = []
    for gradient, variable, scaler, shape in zip(gradients, variables, scalers, shapes):
      if gradient is None:
        floating_gradients.append(None)
      # gradient is encoded, so we use variable to check its size
      # We also assume dtype of variable and gradient is the same
      floating_gradient = tf.cond(tf.size(variable) < FLAGS.size_to_binarize,
                                 lambda: tf.bitcast(gradient, variable.dtype),
                                 lambda: ternary_decoder(gradient, scaler, shape))
      floating_gradients.append(floating_gradient)

    return list(zip(floating_gradients, variables))
bingrad_common.py 文件源码 项目:terngrad 作者: wenwei202 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def clip_gradients_by_stddev(grads_and_vars, clip_factor = 2.5):
    """ Clip gradients to [-clip_factor*stddev, clip_factor*stddev]."""
    gradients, variables = zip(*grads_and_vars)
    clipped_gradients = []
    for gradient in gradients:
        if gradient is None:
            clipped_gradients.append(None)
            continue

        mean_gradient = tf.reduce_mean(gradient)
        stddev_gradient = tf.sqrt(tf.reduce_mean(tf.square(gradient - mean_gradient)))
        #clipped_gradient = tf.clip_by_value(gradient, -clip_factor * stddev_gradient, clip_factor * stddev_gradient)
        clipped_gradient = tf.cond(tf.size(gradient) < FLAGS.size_to_binarize,
                               lambda: gradient,
                               lambda: tf.clip_by_value(gradient, -clip_factor * stddev_gradient, clip_factor * stddev_gradient))

        clipped_gradients.append(clipped_gradient)
    return list(zip(clipped_gradients, variables))
preprocess.py 文件源码 项目:antgo 作者: jianzfb 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def tf_random_aspect_resize(image, label, low_val=1.0, upper_val=1.5):
  shape = tf.shape(image)
  height = shape[0]
  width = shape[1]

  # 1~1.5
  which_side = tf.to_float(tf.random_uniform([1]))[0]
  multi_val = tf.to_float(tf.random_uniform([1]))[0] * (upper_val - low_val) + low_val

  new_height = tf.cond(which_side > 0.5, lambda: tf.to_float(height), lambda: tf.to_float(height) * multi_val)
  new_width = tf.cond(which_side <= 0.5, lambda: tf.to_float(width), lambda: tf.to_float(width) * multi_val)

  new_height = tf.to_int32(new_height)
  new_width = tf.to_int32(new_width)

  image = tf.expand_dims(image, 0)
  label = tf.expand_dims(label, 0)
  resized_image = tf.image.resize_bilinear(image, [new_height, new_width], align_corners=False)
  resized_image = tf.cast(resized_image, tf.uint8)
  resized_label = tf.image.resize_nearest_neighbor(label, [new_height, new_width], align_corners=False)
  resized_label = tf.cast(resized_label, tf.uint8)
  resized_image = tf.squeeze(resized_image, 0)
  resized_label = tf.squeeze(resized_label, 0)
  return resized_image, resized_label
nn.py 文件源码 项目:Chinese-QA 作者: distantJing 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
           is_train=None):
    if args is None or (nest.is_sequence(args) and not args):
        raise ValueError("`args` must be specified")
    if not nest.is_sequence(args):
        args = [args]

    flat_args = [flatten(arg, 1) for arg in args]
    if input_keep_prob < 1.0:
        assert is_train is not None
        flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg)
                     for arg in flat_args]
    flat_out = _linear(flat_args, output_size, bias, bias_start=bias_start, scope=scope)
    out = reconstruct(flat_out, args[0], 1)
    if squeeze:
        out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
    if wd:
        add_wd(wd)

    return out
layers.py 文件源码 项目:LiTeFlow 作者: petrux 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def next_inp(self, time, output):
        """Returns the next input.

        Arguments:
          time: a `int` or unit `Tensor` representing the current timestep.
          output: a `2D Tensor` of shape `[batch_size, output_size]` representing
            the current output.

        *NOTE* that at time `t+1` the desired decoder input is the output
        from the previous step, `t`, it means that at timestep `t` the next
        input is the desired output for the very same timestep, if decoder
        inputs have been provided -- otherwise is just the current output.
        """
        if self._inputs_ta:
            output = tf.cond(
                time < self._inputs_ta.size(),
                lambda: self._inputs_ta.read(time),
                lambda: self.zero_output())  # pylint: disable=W0108
        next_inp = ops.fit(output, self._inp_size)
        return next_inp
TensorflowUtils.py 文件源码 项目:FCN-GoogLeNet 作者: DeepSegment 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5):
    """
    Code taken from http://stackoverflow.com/a/34634291/2267819
    """
    with tf.variable_scope(scope):
        beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
                               , trainable=True)
        gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, 0.02),
                                trainable=True)
        batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(phase_train,
                            mean_var_with_update,
                            lambda: (ema.average(batch_mean), ema.average(batch_var)))
        normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
    return normed
experiment.py 文件源码 项目:Graph-CNN 作者: fps7806 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def create_loss_function(self):
        with tf.variable_scope('loss') as scope:
            self.print_ext('Creating loss function and summaries')
            cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.net.current_V, labels=self.net.labels))

            correct_prediction = tf.cast(tf.equal(tf.argmax(self.net.current_V, 1), self.net.labels), tf.float32)
            accuracy = tf.reduce_mean(correct_prediction)

            # we have 2 variables that will keep track of the best accuracy obtained in training/testing batch
            # SHOULD ONLY BE USED IF test_batch_size == ALL TEST SAMPLES
            self.max_acc_train = tf.Variable(tf.zeros([]), name="max_acc_train")
            self.max_acc_test = tf.Variable(tf.zeros([]), name="max_acc_test")
            max_acc = tf.cond(self.net.is_training, lambda: tf.assign(self.max_acc_train, tf.maximum(self.max_acc_train, accuracy)), lambda: tf.assign(self.max_acc_test, tf.maximum(self.max_acc_test, accuracy)))

            tf.add_to_collection('losses', cross_entropy)
            tf.summary.scalar('accuracy', accuracy)
            tf.summary.scalar('max_accuracy', max_acc)
            tf.summary.scalar('cross_entropy', cross_entropy)

            # if silent == false display these statistics:
            self.reports['accuracy'] = accuracy
            self.reports['max acc.'] = max_acc
            self.reports['cross_entropy'] = cross_entropy

    # check if the model has a saved iteration and return the latest iteration step
experiment.py 文件源码 项目:Graph-CNN 作者: fps7806 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def create_data(self):
        with tf.device("/cpu:0"):
            with tf.variable_scope('input') as scope:
                self.print_ext('Creating training Tensorflow Tensors')

                vertices = self.graph_vertices[:, self.train_idx, :]
                adjacency = self.graph_adjacency[:, self.train_idx, :, :]
                adjacency = adjacency[:, :, :, self.train_idx]
                labels = self.graph_labels[:, self.train_idx]
                input_mask = np.ones([1, len(self.train_idx), 1]).astype(np.float32)

                train_input = [vertices, adjacency, labels, input_mask]
                train_input = self.create_input_variable(train_input)

                vertices = self.graph_vertices
                adjacency = self.graph_adjacency
                labels = self.graph_labels

                input_mask = np.zeros([1, self.largest_graph, 1]).astype(np.float32)
                input_mask[:, self.test_idx, :] = 1
                test_input = [vertices, adjacency, labels, input_mask]
                test_input = self.create_input_variable(test_input)

                return tf.cond(self.net.is_training, lambda: train_input, lambda: test_input)
layers.py 文件源码 项目:Graph-CNN 作者: fps7806 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def make_bn(input, phase, axis=-1, epsilon=0.001, mask=None, num_updates=None, name=None):
    default_decay = GraphCNNGlobal.BN_DECAY
    with tf.variable_scope(name, default_name='BatchNorm') as scope:
        input_size = input.get_shape()[axis].value
        if axis == -1:
            axis = len(input.get_shape())-1
        axis_arr = [i for i in range(len(input.get_shape())) if i != axis]
        if mask == None:
            batch_mean, batch_var = tf.nn.moments(input, axis_arr)
        else:
            batch_mean, batch_var = tf.nn.weighted_moments(input, axis_arr, mask)
        gamma = make_variable('gamma', input_size, initializer=tf.constant_initializer(1))
        beta = make_bias_variable('bias', input_size)
        ema = tf.train.ExponentialMovingAverage(decay=default_decay, num_updates=num_updates)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)
        mean, var = tf.cond(phase, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var)))

        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
TensorflowUtils.py 文件源码 项目:EBGAN.tensorflow 作者: shekkizh 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5, stddev=0.02):
    """
    Code taken from http://stackoverflow.com/a/34634291/2267819
    """
    with tf.variable_scope(scope):
        beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
                               , trainable=True)
        gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, stddev),
                                trainable=True)
        batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(phase_train,
                            mean_var_with_update,
                            lambda: (ema.average(batch_mean), ema.average(batch_var)))
        normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
    return normed
twitter_pos.py 文件源码 项目:GELUs 作者: hendrycks 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def model(data_feed):
        h1 = f(tf.matmul(data_feed, w1) + b1)
        h1 = tf.cond(is_training, lambda: tf.nn.dropout(h1, p), lambda: h1)
        h2 = f(tf.matmul(h1, w2) + b2)
        h2 = tf.cond(is_training, lambda: tf.nn.dropout(h2, p), lambda: h2)
        return tf.matmul(h2, w_out) + b_out
timit_fcn.py 文件源码 项目:GELUs 作者: hendrycks 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def feedforward(x):
        h1 = f(tf.matmul(x, W['1']) + b['1'])
        h1 = tf.cond(is_training, lambda: tf.nn.dropout(h1, p), lambda: h1)
        h2 = f(tf.matmul(h1, W['2']) + b['2'])
        h2 = tf.cond(is_training, lambda: tf.nn.dropout(h2, p), lambda: h2)
        h3 = f(tf.matmul(h2, W['3']) + b['3'])
        h3 = tf.cond(is_training, lambda: tf.nn.dropout(h3, p), lambda: h3)
        h4 = f(tf.matmul(h3, W['4']) + b['4'])
        h4 = tf.cond(is_training, lambda: tf.nn.dropout(h4, p), lambda: h4)
        h5 = f(tf.matmul(h4, W['5']) + b['5'])
        h5 = tf.cond(is_training, lambda: tf.nn.dropout(h5, p), lambda: h5)

        return tf.matmul(h5, W['6']) + b['6']
beam_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _beam_where(self, cond, x, y):
        assert x.shape.is_compatible_with(y.shape)
        original_static_shape = x.shape
        cond = tf.reshape(cond, [self.batch_size * self._beam_width])
        x = self._merge_batch_beams(x, original_static_shape[2:])
        y = self._merge_batch_beams(y, original_static_shape[2:])
        return self._split_batch_beams(tf.where(cond, x, y), original_static_shape[2:])


问题


面经


文章

微信
公众号

扫码关注公众号