python类div()的实例源码

losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, **unused_params):
    bound = FLAGS.softmax_bound
    vocab_size_1 = bound
    with tf.name_scope("loss_softmax"):
      epsilon = 10e-8
      float_labels = tf.cast(labels, tf.float32)
      labels_1 = float_labels[:,:vocab_size_1]
      predictions_1 = predictions[:,:vocab_size_1]
      cross_entropy_loss = CrossEntropyLoss().calculate_loss(predictions_1,labels_1)
      lables_2 = float_labels[:,vocab_size_1:]
      predictions_2 = predictions[:,vocab_size_1:]
      # l1 normalization (labels are no less than 0)
      label_rowsum = tf.maximum(
          tf.reduce_sum(lables_2, 1, keep_dims=True),
          epsilon)
      label_append = 1.0-tf.reduce_max(lables_2, 1, keep_dims=True)
      norm_float_labels = tf.concat((tf.div(lables_2, label_rowsum),label_append),axis=1)
      predictions_append = 1.0-tf.reduce_sum(predictions_2, 1, keep_dims=True)
      softmax_outputs = tf.concat((predictions_2,predictions_append),axis=1)
      softmax_loss = norm_float_labels * tf.log(softmax_outputs + epsilon) + (
          1 - norm_float_labels) * tf.log(1 - softmax_outputs + epsilon)
      softmax_loss = tf.negative(tf.reduce_sum(softmax_loss, 1))
    return tf.reduce_mean(softmax_loss) + cross_entropy_loss
losses_embedding.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, **unused_params):
        bound = FLAGS.softmax_bound
        vocab_size_1 = bound
        with tf.name_scope("loss_softmax"):
            epsilon = 10e-8
            float_labels = tf.cast(labels, tf.float32)
            labels_1 = float_labels[:,:vocab_size_1]
            predictions_1 = predictions[:,:vocab_size_1]
            cross_entropy_loss = CrossEntropyLoss().calculate_loss(predictions_1,labels_1)
            lables_2 = float_labels[:,vocab_size_1:]
            predictions_2 = predictions[:,vocab_size_1:]
            # l1 normalization (labels are no less than 0)
            label_rowsum = tf.maximum(
                tf.reduce_sum(lables_2, 1, keep_dims=True),
                epsilon)
            label_append = 1.0-tf.reduce_max(lables_2, 1, keep_dims=True)
            norm_float_labels = tf.concat((tf.div(lables_2, label_rowsum),label_append),axis=1)
            predictions_append = 1.0-tf.reduce_sum(predictions_2, 1, keep_dims=True)
            softmax_outputs = tf.concat((predictions_2,predictions_append),axis=1)
            softmax_loss = norm_float_labels * tf.log(softmax_outputs + epsilon) + (
                                                                                       1 - norm_float_labels) * tf.log(1 - softmax_outputs + epsilon)
            softmax_loss = tf.negative(tf.reduce_sum(softmax_loss, 1))
        return tf.reduce_mean(softmax_loss) + cross_entropy_loss
policy.py 文件源码 项目:photo-editing-tensorflow 作者: JamesChuanggg 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def discriminator(self, image, reuse=False):

        with tf.variable_scope("discriminator") as scope:
            if reuse:
                scope.reuse_variables()

            image_norm = tf.div(image, 255.)
            h0 = lrelu(conv2d(image_norm, self.df_dim, name='d_h0_conv'))
            h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
            h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
            h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv')))
            h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')
            # Prevent NAN
            h4 += 1e-5
            h4_ = tf.nn.sigmoid(h4) + 1e-5

            return h4_, h4
rnn_regression_upgrade.py 文件源码 项目:DeepLearning 作者: STHSF 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def compute_cost(self):

        losses = tf.nn.seq2seq.sequence_loss_by_example(
            [tf.reshape(self.pred, [-1], name='reshape_pred')],
            [tf.reshape(self.ys, [-1], name='reshape_target')],
            [tf.ones([self.batch_size * self.n_steps], dtype=tf.float32)],
            average_across_timesteps=True,
            softmax_loss_function=self.ms_error,
            name='losses'
        )

        with tf.name_scope('average_cost'):
            self.cost = tf.div(
                tf.reduce_sum(losses, name='losses_sum'),
                self.batch_size,
                name='average_cost')
            tf.summary.scalar('cost', self.cost)
rnn_regression.py 文件源码 项目:DeepLearning 作者: STHSF 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def compute_cost(self):

        losses = tf.nn.seq2seq.sequence_loss_by_example(
            [tf.reshape(self.pred, [-1], name='reshape_pred')],
            [tf.reshape(self.ys, [-1], name='reshape_target')],
            [tf.ones([self.batch_size * self.n_steps], dtype=tf.float32)],
            average_across_timesteps=True,
            softmax_loss_function=self.ms_error,
            name='losses'
        )

        with tf.name_scope('average_cost'):
            self.cost = tf.div(
                tf.reduce_sum(losses, name='losses_sum'),
                self.batch_size,
                name='average_cost')
            tf.scalar_summary('cost', self.cost)
dropout.py 文件源码 项目:jack 作者: uclmr 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def fixed_dropout(xs, keep_prob, noise_shape, seed=None):
    """
    Apply dropout with same mask over all inputs
    Args:
        xs: list of tensors
        keep_prob:
        noise_shape:
        seed:

    Returns:
        list of dropped inputs
    """
    with tf.name_scope("dropout", values=xs):
        noise_shape = noise_shape
        # uniform [keep_prob, 1.0 + keep_prob)
        random_tensor = keep_prob
        random_tensor += tf.random_uniform(noise_shape, seed=seed, dtype=xs[0].dtype)
        # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
        binary_tensor = tf.floor(random_tensor)
        outputs = []
        for x in xs:
            ret = tf.div(x, keep_prob) * binary_tensor
            ret.set_shape(x.get_shape())
            outputs.append(ret)
        return outputs
vbutils.py 文件源码 项目:chemblnet 作者: jaak-s 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, name, shape, initial_stdev = 2.0, initial_prec_a = 5.0, initial_prec_b = 1.0, a0 = 1.0, b0 = 1.0, fixed_prec = False, mean_init_std = None):
        if mean_init_std is None:
            mean_init_std = 1.0 / np.sqrt(shape[-1])
        with tf.variable_scope(name) as scope:
            #self.mean   = tf.get_variable(name="mean", shape=shape, initializer=tf.contrib.layers.xavier_initializer(), dtype = tf.float32)
            #self.var    = tf.Variable(initial_var * np.ones(shape),      name = name + ".var", dtype = tf.float32)
            self.mean   = tf.Variable(tf.random_uniform(shape, minval=-mean_init_std, maxval=mean_init_std))
            self.logvar = tf.Variable(np.log(initial_stdev**2.0) * np.ones(shape), name = "logvar", dtype = tf.float32)
            if fixed_prec:
                self.prec_a = tf.constant(initial_prec_a * np.ones(shape[-1]), name = "prec_a", dtype = tf.float32)
                self.prec_b = tf.constant(initial_prec_b * np.ones(shape[-1]), name = "prec_b", dtype = tf.float32)
            else:
                self.prec_a = tf.Variable(initial_prec_a * np.ones(shape[-1]), name = "prec_a", dtype = tf.float32)
                self.prec_b = tf.Variable(initial_prec_b * np.ones(shape[-1]), name = "prec_b", dtype = tf.float32)
            self.prec   = tf.div(self.prec_a, self.prec_b, name = "prec")
            self.var    = tf.exp(self.logvar, name = "var")
            self.a0     = a0
            self.b0     = b0
            self.shape  = shape
hmc.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def tune(self, acceptance_rate, fresh_start):
        def adapt_stepsize():
            new_step = tf.assign(self.step, (1 - fresh_start) * self.step + 1)
            rate1 = tf.div(1.0, new_step + self.t0)
            new_h_bar = tf.assign(
                self.h_bar, (1 - fresh_start) * (1 - rate1) * self.h_bar +
                rate1 * (self.delta - acceptance_rate))
            log_epsilon = self.mu - tf.sqrt(new_step) / self.gamma * new_h_bar
            rate = tf.pow(new_step, -self.kappa)
            new_log_epsilon_bar = tf.assign(
                self.log_epsilon_bar,
                rate * log_epsilon + (1 - fresh_start) * (1 - rate) *
                self.log_epsilon_bar)
            with tf.control_dependencies([new_log_epsilon_bar]):
                new_log_epsilon = tf.identity(log_epsilon)

            return tf.exp(new_log_epsilon)

        c = tf.cond(self.adapt_step_size,
                    adapt_stepsize,
                    lambda: tf.exp(self.log_epsilon_bar))

        return c
test_computations.py 文件源码 项目:ngraph 作者: NervanaSystems 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_binary_ops_combined(self):
        # computation
        a = tf.placeholder(tf.float32, shape=(2, 3))
        b = tf.placeholder(tf.float32, shape=(2, 3))
        c = tf.add(a, b)
        d = tf.mul(c, a)
        e = tf.div(d, b)
        f = tf.sub(a, e)
        g = tf.maximum(a, f)

        # value
        a_val = np.random.rand(*tf_obj_shape(a))
        b_val = np.random.rand(*tf_obj_shape(b))

        # test
        self.run(g, tf_feed_dict={a: a_val, b: b_val})
crbm.py 文件源码 项目:CDBN-for-Tensorflow 作者: shygiants 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __declare_variables(self):
        # Bias variables
        self.bias = self.__bias_variables([self.num_features], 'bias')
        self.cias = self.__bias_variables([self.depth], 'cias')

        # Visible (input) units
        with tf.name_scope('visible') as _:
            self.x = self._input if self._input is not None \
                else tf.placeholder(tf.float32, shape=self.input_shape, name='x')
            self.vis_0 = tf.div(self.x, 255, 'vis_0')

        # Weight variables
        with tf.name_scope('weights') as _:
            self.weights = self.__weight_variable(self.weight_shape, 'weights_forward')
            self.weights_flipped = tf.transpose(
                tf.reverse(self.weights, [True, True, False, False]), perm=[0, 1, 3, 2], name='weight_back')

        self.variables = [self.bias, self.cias, self.weights]
crbm.py 文件源码 项目:CDBN-for-Tensorflow 作者: shygiants 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __gibbs_sampling(self):
        # Gibbs sampling
        # Sample visible units
        with tf.name_scope('visible') as _:
            signal_back = self.__conv2d(self.hid_state0, self.weights_flipped) + self.cias
            if self.is_continuous:
                # Visible units are continuous
                normal_dist = tf.contrib.distributions.Normal(
                    mu=signal_back, sigma=1.)
                self.vis_1 = tf.reshape(
                    tf.div(normal_dist.sample_n(1), self.weight_size * self.weight_size),
                    self.input_shape, name='vis_1')
            else:
                # Visible units are binary
                vis1_prob = tf.sigmoid(signal_back, name='vis_1')
                self.vis_1 = self.__sample(vis1_prob, 'vis_1')

        # Sample hidden units
        with tf.name_scope('hidden') as _:
            self.hid_prob1 = tf.sigmoid(self.__conv2d(self.vis_1, self.weights) + self.bias, name='hid_prob_1')
impute.py 文件源码 项目:aboleth 作者: data61 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _impute2D(self, X_2D):
        r"""Mean impute a rank 2 tensor."""
        # Fill zeros in for missing data initially
        data_zeroed_missing_tf = X_2D * self.real_val_mask

        # Sum the real values in each column
        col_tot = tf.reduce_sum(data_zeroed_missing_tf, 0)

        # Divide column totals by the number of non-nan values
        num_values_col = tf.reduce_sum(self.real_val_mask, 0)
        num_values_col = tf.maximum(num_values_col,
                                    tf.ones(tf.shape(num_values_col)))
        col_nan_means = tf.div(col_tot, num_values_col)

        # Make an vector of the impute values for each missing point
        imputed_vals = tf.gather(col_nan_means, self.missing_ind[:, 1])

        # Fill the imputed values into the data tensor of zeros
        shape = tf.cast(tf.shape(data_zeroed_missing_tf), dtype=tf.int64)
        missing_imputed = tf.scatter_nd(self.missing_ind, imputed_vals, shape)

        X_with_impute = data_zeroed_missing_tf + missing_imputed

        return X_with_impute
conv_encoder_utils.py 文件源码 项目:conv_seq2seq 作者: tobyyouup 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def linear_mapping_weightnorm(inputs, out_dim, in_dim=None, dropout=1.0, var_scope_name="linear_mapping"):
  with tf.variable_scope(var_scope_name):
    input_shape = inputs.get_shape().as_list()    # static shape. may has None
    input_shape_tensor = tf.shape(inputs)    
    # use weight normalization (Salimans & Kingma, 2016)  w = g* v/2-norm(v)
    V = tf.get_variable('V', shape=[int(input_shape[-1]), out_dim], dtype=tf.float32, initializer=tf.random_normal_initializer(mean=0, stddev=tf.sqrt(dropout*1.0/int(input_shape[-1]))), trainable=True)
    V_norm = tf.norm(V.initialized_value(), axis=0)  # V shape is M*N,  V_norm shape is N
    g = tf.get_variable('g', dtype=tf.float32, initializer=V_norm, trainable=True)
    b = tf.get_variable('b', shape=[out_dim], dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=True)   # weightnorm bias is init zero

    assert len(input_shape) == 3
    inputs = tf.reshape(inputs, [-1, input_shape[-1]])
    inputs = tf.matmul(inputs, V)
    inputs = tf.reshape(inputs, [input_shape_tensor[0], -1, out_dim])
    #inputs = tf.matmul(inputs, V)    # x*v

    scaler = tf.div(g, tf.norm(V, axis=0))   # g/2-norm(v)
    inputs = tf.reshape(scaler,[1, out_dim])*inputs + tf.reshape(b,[1, out_dim])   # x*v g/2-norm(v) + b


    return inputs
lenet_preprocessing.py 文件源码 项目:terngrad 作者: wenwei202 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image
lenet_preprocessing.py 文件源码 项目:tf-slim-mnist 作者: mnuke 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image
StateProcessor.py 文件源码 项目:deep-RL-DQN-tensorflow 作者: ZidanMusk 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self):

        self.history = StateProcessorSetting.history_length
        self.dims = StateProcessorSetting.observation_dims
        pass

        #get current,prev frame, set by env
        with tf.variable_scope('input', reuse =True):
            self.cur_frame = tf.get_variable('cur_frame',dtype = tf.uint8)
            self.prev_frame = tf.get_variable('prev_frame',dtype = tf.uint8)

        with tf.variable_scope('input'):
            maxOf2 = tf.maximum(tf.to_float(self.cur_frame), tf.to_float(self.prev_frame))
            toGray = tf.expand_dims(tf.image.rgb_to_grayscale(maxOf2), 0)
            resize = tf.image.resize_bilinear(toGray, self.dims, align_corners=None, name='observation')
            self.observe = tf.div(tf.squeeze(resize), 255.0) 

            self.state = tf.get_variable(name = 'state', shape = [self.dims[0],self.dims[1],self.history], dtype = tf.float32,initializer = tf.constant_initializer(0.0),trainable = False)
            self.to_stack = tf.expand_dims(self.observe, 2)
            self.f3, self.f2, self.f1, _ = tf.split(2, self.history, self.state)  # each is 84x84x1
            self.concat = tf.concat(2, [self.to_stack, self.f3, self.f2, self.f1], name='concat')
            self.updateState = self.state.assign(self.concat)
vgg_network.py 文件源码 项目:texture-networks 作者: ProofByConstruction 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def style_loss(self, layers):
        activations = [self.activations_for_layer(i) for i in layers]
        gramians = [self.gramian_for_layer(x) for x in layers]
        # Slices are for style and synth image
        gramian_diffs = [
            tf.sub(
                tf.tile(tf.slice(g, [0, 0, 0], [self.num_style, -1, -1]), [self.num_synthesized - self.num_style + 1, 1, 1]),
                tf.slice(g, [self.num_style + self.num_content, 0, 0], [self.num_synthesized, -1, -1]))
            for g in gramians]
        Ns = [g.get_shape().as_list()[2] for g in gramians]
        Ms = [a.get_shape().as_list()[1] * a.get_shape().as_list()[2] for a in activations]
        scaled_diffs = [tf.square(g) for g in gramian_diffs]
        style_loss = tf.div(
            tf.add_n([tf.div(tf.reduce_sum(x), 4 * (N ** 2) * (M ** 2)) for x, N, M in zip(scaled_diffs, Ns, Ms)]),
            len(layers))
        return style_loss
lenet_preprocessing.py 文件源码 项目:tensorflow_yolo2 作者: wenxichen 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image
lenet_preprocessing.py 文件源码 项目:Classification_Nets 作者: BobLiu20 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image
model.py 文件源码 项目:mean-teacher 作者: CuriousAI 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def classification_costs(logits, labels, name=None):
    """Compute classification cost mean and classification cost per sample

    Assume unlabeled examples have label == -1. For unlabeled examples, cost == 0.
    Compute the mean over all examples.
    Note that unlabeled examples are treated differently in error calculation.
    """
    with tf.name_scope(name, "classification_costs") as scope:
        applicable = tf.not_equal(labels, -1)

        # Change -1s to zeros to make cross-entropy computable
        labels = tf.where(applicable, labels, tf.zeros_like(labels))

        # This will now have incorrect values for unlabeled examples
        per_sample = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)

        # Retain costs only for labeled
        per_sample = tf.where(applicable, per_sample, tf.zeros_like(per_sample))

        # Take mean over all examples, not just labeled examples.
        labeled_sum = tf.reduce_sum(per_sample)
        total_count = tf.to_float(tf.shape(per_sample)[0])
        mean = tf.div(labeled_sum, total_count, name=scope)

        return mean, per_sample
gmm_ops.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _init_clusters_random(data, num_clusters, random_seed):
  """Does random initialization of clusters.

  Args:
    data: a list of Tensors with a matrix of data, each row is an example.
    num_clusters: an integer with the number of clusters.
    random_seed: Seed for PRNG used to initialize seeds.

  Returns:
    A Tensor with num_clusters random rows of data.
  """
  assert isinstance(data, list)
  num_data = tf.add_n([tf.shape(inp)[0] for inp in data])
  with tf.control_dependencies([tf.assert_less_equal(num_clusters, num_data)]):
    indices = tf.random_uniform([num_clusters],
                                minval=0,
                                maxval=tf.cast(num_data, tf.int64),
                                seed=random_seed,
                                dtype=tf.int64)
  indices = tf.cast(indices, tf.int32) % num_data
  clusters_init = embedding_lookup(data, indices, partition_strategy='div')
  return clusters_init
gmm_ops.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _init_clusters_random(data, num_clusters, random_seed):
  """Does random initialization of clusters.

  Args:
    data: a list of Tensors with a matrix of data, each row is an example.
    num_clusters: an integer with the number of clusters.
    random_seed: Seed for PRNG used to initialize seeds.

  Returns:
    A Tensor with num_clusters random rows of data.
  """
  assert isinstance(data, list)
  num_data = tf.add_n([tf.shape(inp)[0] for inp in data])
  with tf.control_dependencies([tf.assert_less_equal(num_clusters, num_data)]):
    indices = tf.random_uniform([num_clusters],
                                minval=0,
                                maxval=tf.cast(num_data, tf.int64),
                                seed=random_seed,
                                dtype=tf.int64)
  indices = tf.cast(indices, tf.int32) % num_data
  clusters_init = embedding_lookup(data, indices, partition_strategy='div')
  return clusters_init
lenet_preprocessing.py 文件源码 项目:segmentation-models 作者: desimone 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

      Args:
        image: A `Tensor` representing an image of arbitrary size.
        output_height: The height of the image after preprocessing.
        output_width: The width of the image after preprocessing.
        is_training: `True` if we're preprocessing the image for training and
          `False` otherwise.

      Returns:
        A preprocessed image.
      """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(image, output_width,
                                                 output_height)
  image = tf.sub(image, 128.0)
  image = tf.div(image, 128.0)
  return image
lenet_preprocessing.py 文件源码 项目:shuttleNet 作者: shiyemin 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image
layers.py 文件源码 项目:cyclegan 作者: 4Catalyzer 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def instance_norm(x):

    with tf.variable_scope("instance_norm"):
        epsilon = 1e-5
        mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)
        scale = tf.get_variable('scale', [x.get_shape()[-1]],
                                initializer=tf.truncated_normal_initializer(
                                    mean=1.0, stddev=0.02
        ))
        offset = tf.get_variable(
            'offset', [x.get_shape()[-1]],
            initializer=tf.constant_initializer(0.0)
        )
        out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset

        return out
VAE_util.py 文件源码 项目:VAE_rec 作者: RobRomijnders 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def tf_2d_normal(x1, x2, mu1, mu2, s1, s2, rho):
  """ 2D normal distribution
  input
  - x,mu: input vectors
  - s1,s2: standard deviances over x1 and x2
  - rho: correlation coefficient in x1-x2 plane
  """
  # eq # 24 and 25 of http://arxiv.org/abs/1308.0850
  norm1 = tf.sub(x1, mu1)
  norm2 = tf.sub(x2, mu2)
  s1s2 = tf.mul(s1, s2)
  z = tf.square(tf.div(norm1, s1))+tf.square(tf.div(norm2, s2))-2.0*tf.div(tf.mul(rho, tf.mul(norm1, norm2)), s1s2)
  negRho = 1-tf.square(rho)
  result = tf.exp(tf.div(-1.0*z,2.0*negRho))
  denom = 2*np.pi*tf.mul(s1s2, tf.sqrt(negRho))
  px1x2 = tf.div(result, denom)
  return px1x2
lenet_preprocessing.py 文件源码 项目:the-neural-perspective 作者: GokuMohandas 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.sub(image, 128.0)
  image = tf.div(image, 128.0)
  return image
res_model.py 文件源码 项目:Mendelssohn 作者: diggerdu 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def _get_loss(self, name):
        return tf.reduce_mean(tf.multiply(self.target/tf.reduce_mean(self.target),
                              tf.div(tf.log1p(self.target), tf.log1p(self.output))),
                              name=name)

        '''
        return tf.reduce_mean(tf.multiply(tf.log1p(self.target/tf.reduce_mean(self.target)),
                              tf.abs(tf.subtract(self.target, self.output))),
                              name=name)

        return tf.reduce_mean(tf.multiply(self.target/tf.reduce_mean(self.target),
                              tf.abs(tf.subtract(self.target, self.output))),
                              name=name)
        return tf.reduce_mean(tf.multiply(1.0,
                              tf.abs(tf.subtract(self.target, self.output))),
                              name=name)
        '''
model.py 文件源码 项目:Mendelssohn 作者: diggerdu 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _get_loss(self, name):
        return tf.reduce_mean(tf.multiply(self.target/tf.reduce_mean(self.target),
                              tf.div(tf.log1p(self.target), tf.log1p(self.output))),
                              name=name)

        '''
        return tf.reduce_mean(tf.multiply(tf.log1p(self.target/tf.reduce_mean(self.target)),
                              tf.abs(tf.subtract(self.target, self.output))),
                              name=name)

        return tf.reduce_mean(tf.multiply(self.target/tf.reduce_mean(self.target),
                              tf.abs(tf.subtract(self.target, self.output))),
                              name=name)
        return tf.reduce_mean(tf.multiply(1.0,
                              tf.abs(tf.subtract(self.target, self.output))),
                              name=name)
        '''
lenet_preprocessing.py 文件源码 项目:fast-neural-style 作者: coder-james 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.sub(image, 128.0)
  image = tf.div(image, 128.0)
  return image


问题


面经


文章

微信
公众号

扫码关注公众号