python类divide()的实例源码

CandidateSample.py 文件源码 项目:TFCommon 作者: MU94W 项目源码 文件源码 阅读 70 收藏 0 点赞 0 评论 0
def sampled_softmax_loss(label, logit, projection, num_sampled):
    """
    Args:
        label:
        logit:          unscaled log probabilities
        projection:     (W, b)
        num_sampled:
    """
    local_label = tf.reshape(label, shape=(-1,1))
    local_logit = tf.reshape(logit, shape=(-1, logit.get_shape()[-1].value))
    local_Wt    = tf.transpose(projection[0], perm=(1,0))
    local_b     = projection[1]
    loss_sum    = tf.nn.sampled_softmax_loss(weights=local_Wt, biases=local_b,
                                             labels=local_label,
                                             inputs=local_logit,
                                             num_sampled=num_sampled,
                                             num_classes=local_Wt.get_shape()[0].value)
    loss = tf.divide(tf.reduce_sum(loss_sum), tf.cast(tf.size(local_label), dtype=tf.float32))
    return loss
label_cats.py 文件源码 项目:RaspberryPi-Robot 作者: timestocome 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def read_tensor_from_image_file(file_name='test.jpg', input_height=128, input_width=128,
                input_mean=0, input_std=255):


  input_name = "file_reader"
  output_name = "normalized"
  file_reader = tf.read_file(file_name, input_name)
  image_reader = tf.image.decode_jpeg(file_reader, channels = 3, name='jpeg_reader')
  float_caster = tf.cast(image_reader, tf.float32)
  dims_expander = tf.expand_dims(float_caster, 0);
  resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
  normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
  sess = tf.Session()
  result = sess.run(normalized)

  return result
layers.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def masked_softmax(tensor, mask, expand=2, axis=1):
    """Masked soft-max using Lambda and merge-multiplication.

    Args:
        tensor: tensor containing scores
        mask: mask for tensor where 1 - means values at this position and 0 - means void, padded, etc..
        expand: axis along which to repeat mask
        axis: axis along which to compute soft-max

    Returns:
        masked soft-max values
    """

    mask = tf.expand_dims(mask, axis=expand)
    exponentiate = Lambda(lambda x: K.exp(x - K.max(x, axis=axis, keepdims=True)))(tensor)
    masked = tf.multiply(exponentiate, mask)
    div = tf.expand_dims(tf.reduce_sum(masked, axis=axis), axis=axis)
    predicted = tf.divide(masked, div)
    return predicted
images.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def yuv2rgb(yuv):
    """
    Convert YUV image into RGB https://en.wikipedia.org/wiki/YUV
    """
    yuv = tf.multiply(yuv, 255)
    yuv2rgb_filter = tf.constant([[[[1., 1., 1.], [0., -0.34413999, 1.77199996],
                                    [1.40199995, -0.71414, 0.]]]])
    yuv2rgb_bias = tf.constant([-179.45599365, 135.45983887, -226.81599426])

    yuv = tf.expand_dims(yuv, 0)
    temp = tf.nn.conv2d(yuv, yuv2rgb_filter, [1, 1, 1, 1], 'SAME')
    temp = tf.nn.bias_add(temp, yuv2rgb_bias)
    temp = tf.maximum(temp, tf.zeros(temp.get_shape(), dtype=tf.float32))
    temp = tf.minimum(temp,
                      tf.multiply(
                          tf.ones(temp.get_shape(), dtype=tf.float32), 255))
    temp = tf.divide(temp, 255)
    temp = tf.squeeze(temp, [0])
    return temp
SingleLayerCAE.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def loss(self, predictions, real_values):
        """Return the loss operation between predictions and real_values.
        Add L2 weight decay term if any.
        Args:
            predictions: predicted values
            real_values: real values
        Returns:
            Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # 1/2n \sum^{n}_{i=i}{(x_i - x'_i)^2}
            mse = tf.divide(
                tf.reduce_mean(
                    tf.square(tf.subtract(predictions, real_values))),
                2.,
                name="mse")
            tf.add_to_collection(LOSSES, mse)

            # mse + weight_decay per layer
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
label_image.py 文件源码 项目:keras-to-tensorflow 作者: bitbionic 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def read_tensor_from_image_file(file_name, input_height=299, input_width=299,
                input_mean=0, input_std=255):
  input_name = "file_reader"
  output_name = "normalized"
  file_reader = tf.read_file(file_name, input_name)
  if file_name.endswith(".png"):
    image_reader = tf.image.decode_png(file_reader, channels = 3,
                                       name='png_reader')
  elif file_name.endswith(".gif"):
    image_reader = tf.squeeze(tf.image.decode_gif(file_reader,
                                                  name='gif_reader'))
  elif file_name.endswith(".bmp"):
    image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
  else:
    image_reader = tf.image.decode_jpeg(file_reader, channels = 3,
                                        name='jpeg_reader')
  float_caster = tf.cast(image_reader, tf.float32)
  dims_expander = tf.expand_dims(float_caster, 0);
  resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
  normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
  sess = tf.Session()
  result = sess.run(normalized)

  return result
analyzers.py 文件源码 项目:transform 作者: tensorflow 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def mean(x, reduce_instance_dims=True, name=None):
  """Computes the mean of the values of a `Tensor` over the whole dataset.

  Args:
    x: A `Tensor`.
    reduce_instance_dims: By default collapses the batch and instance dimensions
        to arrive at a single scalar output. If False, only collapses the batch
        dimension and outputs a vector of the same shape as the input.
    name: (Optional) A name for this operation.

  Returns:
    A `Tensor` containing the mean. If `x` is floating point, the mean will
    have the same type as `x`. If `x` is integral, the output is cast to float32
    for int8 and int16 and float64 for int32 and int64 (similar to the behavior
    of tf.truediv).
  """
  with tf.name_scope(name, 'mean'):
    # Note: Calling `sum` defined in this module, not the builtin.
    return tf.divide(
        sum(x, reduce_instance_dims), size(x, reduce_instance_dims))
distributions.py 文件源码 项目:wide-deep-cnn 作者: DaniUPC 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def optimized_loss(self, targets, logits):
        """ Function that computes the loss of a mixture density network
        in a way that it handles underflow and overflow and avoids unstable
        behaviors """
        # Obtain parameters
        mixings, sigma, mean = self.logits_to_params(logits)
        output_size = tf.cast(tf.shape(targets)[1], tf.float32)
        variance = tf.square(sigma)
        # Convert expressions into exponent-based terms
        mixings_exp = tf.log(mixings)
        # By properties of logarithm we can simplify the original expression
        # log(x/y) = log(x) - log(y), log(xy) = log(x) + log(y), log(1) = 0
        sqrt_exp = - output_size * (0.5 * tf.log(2*np.pi) + tf.log(sigma))
        gaussian_exp = -tf.divide(tf.square(targets - mean), 2 * variance)
        exponent = mixings_exp + sqrt_exp + gaussian_exp
        # Use optimized logsumexp function to control underflow/overflow
        return tf.reduce_logsumexp(exponent, axis=1)
lstm.py 文件源码 项目:yaset 作者: jtourille 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def loss_crf(self):
        """
        CRF based loss.
        :return: loss
        """

        # Reshaping seq_len tensor [seq_len, 1]
        seq_length_reshaped = tf.reshape(self.x_tokens_len, [tf.shape(self.x_tokens_len)[0], -1])

        # Computing loss by scanning mini-batch tensor
        out = tf.scan(self.loss_crf_scan, [self.prediction,
                                           seq_length_reshaped,
                                           self.y], back_prop=True, infer_shape=True, initializer=0.0)

        # Division by batch_size
        loss_crf = tf.divide(tf.reduce_sum(out), tf.cast(tf.shape(self.x_tokens)[0], dtype=tf.float32))

        return loss_crf
label_image.py 文件源码 项目:tensorflow-for-poets-2 作者: googlecodelabs 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def read_tensor_from_image_file(file_name, input_height=299, input_width=299,
                input_mean=0, input_std=255):
  input_name = "file_reader"
  output_name = "normalized"
  file_reader = tf.read_file(file_name, input_name)
  if file_name.endswith(".png"):
    image_reader = tf.image.decode_png(file_reader, channels = 3,
                                       name='png_reader')
  elif file_name.endswith(".gif"):
    image_reader = tf.squeeze(tf.image.decode_gif(file_reader,
                                                  name='gif_reader'))
  elif file_name.endswith(".bmp"):
    image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
  else:
    image_reader = tf.image.decode_jpeg(file_reader, channels = 3,
                                        name='jpeg_reader')
  float_caster = tf.cast(image_reader, tf.float32)
  dims_expander = tf.expand_dims(float_caster, 0);
  resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
  normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
  sess = tf.Session()
  result = sess.run(normalized)

  return result
builder.py 文件源码 项目:KBOPrediction 作者: riceluxs1t 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def get_accuracy(self, x_test_home, x_test_away, y_test, keep_prop=1.0):
        """
        The predictions from x_test_home and x_test_away are mapped to 1 or 0 depending on whether the
        home team wins or not. Then it is compared with y_test which is the ground truth.
        """
        predict = tf.map_fn(
            lambda x: x[0] > x[1],
            self.sess.run(
                self.hypothesis, 
                feed_dict={
                self.X_home: x_test_home, 
                self.X_away: x_test_away, 
                self.Y: y_test, 
                self.keep_prob: keep_prop}
            ), 
            dtype=bool)

        real = tf.map_fn(
            lambda x: x[0] > x[1],
            y_test,
            dtype=bool)

        return self.sess.run(
            tf.divide(
                tf.reduce_sum(tf.cast(tf.equal(predict, real), dtype=tf.int32)), len(y_test)))
recurrentNetwork.py 文件源码 项目:TikZ 作者: ellisk42 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def decodesIntoAccuracy(self, labels, perSymbol = True):
        # as the dimensions None x L
        accuracyMatrix = tf.equal(self.hardOutputs, labels)

        # zero out anything past the labeled length
        accuracyMatrix = tf.logical_and(accuracyMatrix,
                                        tf.sequence_mask(self.lengthPlaceholder, maxlen = self.maximumLength))

        # Some across all of the time steps to get the total number of predictions correct in each batch entry
        accuracyVector = tf.reduce_sum(tf.cast(accuracyMatrix,tf.int32),axis = 1)
        if perSymbol:
            # Now normalize it by the sequence length and take the average
            accuracyVector = tf.divide(tf.cast(accuracyVector,tf.float32),
                                       tf.cast(self.lengthPlaceholder,tf.float32))
        if not perSymbol:
            # accuracy is measured per sequence
            accuracyVector = tf.cast(tf.equal(accuracyVector,self.lengthPlaceholder),tf.float32)
        return tf.reduce_mean(accuracyVector)
util.py 文件源码 项目:pydatalab 作者: googledatalab 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _scale_tensor(tensor, range_min, range_max, scale_min, scale_max):
  """Scale a tensor to scale_min to scale_max.

  Args:
    tensor: input tensor. Should be a numerical tensor.
    range_min: min expected value for this feature/tensor.
    range_max: max expected Value.
    scale_min: new expected min value.
    scale_max: new expected max value.

  Returns:
    scaled tensor.
  """
  if range_min == range_max:
    return tensor

  float_tensor = tf.to_float(tensor)
  scaled_tensor = tf.divide((tf.subtract(float_tensor, range_min) *
                             tf.constant(float(scale_max - scale_min))),
                            tf.constant(float(range_max - range_min)))
  shifted_tensor = scaled_tensor + tf.constant(float(scale_min))

  return shifted_tensor
mic_label.py 文件源码 项目:transfer_learning_sound_classification 作者: lukeinator42 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def read_tensor_from_image_file(file_name, input_height=299, input_width=299,
                input_mean=0, input_std=255):
  input_name = "file_reader"
  output_name = "normalized"
  file_reader = tf.read_file(file_name, input_name)
  if file_name.endswith(".png"):
    image_reader = tf.image.decode_png(file_reader, channels = 3,
                                       name='png_reader')
  elif file_name.endswith(".gif"):
    image_reader = tf.squeeze(tf.image.decode_gif(file_reader,
                                                  name='gif_reader'))
  elif file_name.endswith(".bmp"):
    image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
  else:
    image_reader = tf.image.decode_jpeg(file_reader, channels = 3,
                                        name='jpeg_reader')
  float_caster = tf.cast(image_reader, tf.float32)
  dims_expander = tf.expand_dims(float_caster, 0);
  resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
  normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
  sess = tf.Session()
  result = sess.run(normalized)

  return result
mobilenet_v1_1_224.py 文件源码 项目:triplet-reid 作者: VisualComputingInstitute 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def endpoints(image, is_training):
    if image.get_shape().ndims != 4:
        raise ValueError('Input must be of size [batch, height, width, 3]')

    image = tf.divide(image, 255.0)

    with tf.contrib.slim.arg_scope(mobilenet_v1_arg_scope(batch_norm_decay=0.9, weight_decay=0.0)):
        _, endpoints = mobilenet_v1(image, num_classes=1001, is_training=is_training)

    endpoints['model_output'] = endpoints['global_pool'] = tf.reduce_mean(
        endpoints['Conv2d_13_pointwise'], [1, 2], name='global_pool', keep_dims=False)

    return endpoints, 'MobilenetV1'


# This is copied and modified from mobilenet_v1.py.
linear_svm.py 文件源码 项目:TensorFlow-World 作者: astorfi 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def loss_fn(W,b,x_data,y_target):
    logits = tf.subtract(tf.matmul(x_data, W),b)
    norm_term = tf.divide(tf.reduce_sum(tf.multiply(tf.transpose(W),W)),2)
    classification_loss = tf.reduce_mean(tf.maximum(0., tf.subtract(FLAGS.delta, tf.multiply(logits, y_target))))
    total_loss = tf.add(tf.multiply(FLAGS.C_param,classification_loss), tf.multiply(FLAGS.Reg_param,norm_term))
    return total_loss
layers.py 文件源码 项目:vae-npvc 作者: JeremyCCHsu 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def GumbelSoftmaxLogDensity(y, p, tau):
    # EPS = tf.constant(1e-10)
    k = tf.shape(y)[-1]
    k = tf.cast(k, tf.float32)
    # y = y + EPS
    # y = tf.divide(y, tf.reduce_sum(y, -1, keep_dims=True))
    y = normalize_to_unit_sum(y)
    sum_p_over_y = tf.reduce_sum(tf.divide(p, tf.pow(y, tau)), -1)
    logp = tf.lgamma(k)
    logp = logp + (k - 1) * tf.log(tau)
    logp = logp - k * tf.log(sum_p_over_y)
    logp = logp + sum_p_over_y
    return logp
layers.py 文件源码 项目:vae-npvc 作者: JeremyCCHsu 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def normalize_to_unit_sum(x, EPS=1e-10):
    ''' Along the last dim '''
    EPS = tf.constant(EPS, dtype=tf.float32)
    x = x + EPS
    x_sum = tf.reduce_sum(x, -1, keep_dims=True)
    x = tf.divide(x, x_sum)
    return x
optimizer.py 文件源码 项目:tfutils 作者: neuroailab 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def accumulate_gradients(self, minibatch_grads, num_minibatches=1):
        """Accumulate gradients for `num_minibatches` minibatches."""
        if self.var_list is None:
            self.var_list = tf.trainable_variables()

        if self.grads_and_vars is None:
            self.grads_and_vars = [(
                tf.Variable(tf.zeros_like(var.initialized_value()),
                            dtype=tf.float32,
                            trainable=False),
                var) for var in self.var_list]

        # Add 1/num_minibatches * minibatch_grads to current gradients.
        def _add_op(gv_tmp, mgv_tmp):
            return tf.add(gv_tmp, tf.divide(mgv_tmp, num_minibatches))
        def _set_op(gv_tmp, mgv_tmp):
            return tf.assign(gv_tmp, tf.divide(mgv_tmp, num_minibatches))
        #grads = [(gv[0].assign_add(tf.divide(mgv[0], num_minibatches)), gv[1])
        #         for (gv, mgv) in zip(self.grads_and_vars, minibatch_grads)]
        #grads = tf.cond(tf.less(self.mini_flag[0], 0.5), fn1 = lambda: _add_op(), fn2 = lambda: _set_op())
        grads = [tf.cond(tf.less(self.mini_flag[0], 0.5), fn1 = lambda: _set_op(gv[0], mgv[0]), fn2 = lambda: _add_op(gv[0], mgv[0]))
                 for (gv, mgv) in zip(self.grads_and_vars, minibatch_grads)]
        with tf.control_dependencies(grads):
            self.mini_flag = tf.assign(self.mini_flag, tf.constant([1], dtype = tf.float32))
        grads = [(only_grad, gv[1])
                 for (gv, only_grad) in zip(self.grads_and_vars, grads)]
        return self.mini_flag, grads
Attention.py 文件源码 项目:TFCommon 作者: MU94W 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __call__(self, query):

        with tf.variable_scope('attention'):
            # Check if the memory's batch_size is consistent with query's batch_size

            query_units = query.get_shape()[-1].value

            Wa = tf.get_variable(name='Wa', shape=(query_units, self.attention_units))
            Va = tf.get_variable(name='Va', shape=(self.attention_units,),
                                 initializer=tf.constant_initializer(0.0) if self.mode == 0 else tf.constant_initializer(1e-2))
            b  = tf.get_variable(name='b',  shape=(self.attention_units,),
                                 initializer=tf.constant_initializer(0.0) if self.mode == 0 else tf.constant_initializer(0.5))

            # 1st. compute query_feat (query's repsentation in attention module)
            query_feat = tf.reshape(tf.matmul(query, Wa), (-1, 1, 1, self.attention_units))

            # 2nd. compute the energy for all time steps in encoder (element-wise mul then reduce)
            e = tf.reduce_sum(Va * tf.nn.tanh(self.hidden_feats + query_feat + b), axis=(2,3))

            # 3rd. compute the score
            if self.mask is not None:
                exp_e = tf.exp(e)
                exp_e = exp_e * self.mask
                alpha = tf.divide(exp_e, tf.reduce_sum(exp_e, axis=-1, keep_dims=True))
            else:
                alpha = tf.nn.softmax(e)

            # 4th. get the weighted context from memory (element-wise mul then reduce)
            context = tf.reshape(alpha, (tf.shape(query)[0], self.enc_length, 1, 1)) * self.memory
            context = tf.reduce_sum(context, axis=(1, 2))

            return context, alpha
metrics.py 文件源码 项目:TFCommon 作者: MU94W 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def perplexity(label, logit):
    words = tf.cast(tf.size(label), tf.float32)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=logit)
    cross_entropy = tf.divide(tf.reduce_sum(cross_entropy), words)
    perplex = tf.pow(2.0, cross_entropy)
    return perplex
findCats.py 文件源码 项目:RaspberryPi-Robot 作者: timestocome 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def capture_image(self):

      image = np.empty((self.width, self.height, 3), dtype=np.uint8)
      self.camera.capture(image, 'rgb')

      float_caster = tf.cast(image, tf.float32)
      dims_expander = tf.expand_dims(float_caster, 0);
      resized = tf.image.resize_bilinear(dims_expander, [self.height, self.width])

      normalized = tf.divide(tf.subtract(resized, [self.input_mean]), [self.input_std])
      sess = tf.Session()
      result = sess.run(normalized)

      return result
bp_mll.py 文件源码 项目:bp-mll-tensorflow 作者: vanHavel 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def bp_mll_loss(y_true, y_pred):

    # get true and false labels
    shape = tf.shape(y_true)
    y_i = tf.equal(y_true, tf.ones(shape))
    y_i_bar = tf.not_equal(y_true, tf.ones(shape))

    # get indices to check
    truth_matrix = tf.to_float(pairwise_and(y_i, y_i_bar))

    # calculate all exp'd differences
    sub_matrix = pairwise_sub(y_pred, y_pred)
    exp_matrix = tf.exp(tf.negative(sub_matrix))

    # check which differences to consider and sum them
    sparse_matrix = tf.multiply(exp_matrix, truth_matrix)
    sums = tf.reduce_sum(sparse_matrix, axis=[1,2])

    # get normalizing terms and apply them
    y_i_sizes = tf.reduce_sum(tf.to_float(y_i), axis=1)
    y_i_bar_sizes = tf.reduce_sum(tf.to_float(y_i_bar), axis=1)
    normalizers = tf.multiply(y_i_sizes, y_i_bar_sizes)
    results = tf.divide(sums, normalizers)

    # sum over samples
    return tf.reduce_sum(results)

# compute pairwise differences between elements of the tensors a and b
data_handler.py 文件源码 项目:tf-crnn 作者: solivr 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def preprocess_image_for_prediction(fixed_height: int=32, min_width: int=8):
    """
    Input function to use when exporting the model for making predictions (see estimator.export_savedmodel)
    :param fixed_height: height of the input image after resizing
    :param min_width: minimum width of image after resizing
    :return:
    """

    def serving_input_fn():
        # define placeholder for input image
        image = tf.placeholder(dtype=tf.float32, shape=[None, None, 1])

        shape = tf.shape(image)
        # Assert shape is h x w x c with c = 1

        ratio = tf.divide(shape[1], shape[0])
        increment = CONST.DIMENSION_REDUCTION_W_POOLING
        new_width = tf.cast(tf.round((ratio * fixed_height) / increment) * increment, tf.int32)

        resized_image = tf.cond(new_width < tf.constant(min_width, dtype=tf.int32),
                                true_fn=lambda: tf.image.resize_images(image, size=(fixed_height, min_width)),
                                false_fn=lambda: tf.image.resize_images(image, size=(fixed_height, new_width))
                                )

        # Features to serve
        features = {'images': resized_image[None],  # cast to 1 x h x w x c
                    'images_widths': new_width[None]  # cast to tensor
                    }

        # Inputs received
        receiver_inputs = {'images': image}

        return tf.estimator.export.ServingInputReceiver(features, receiver_inputs)

    return serving_input_fn
ModelsMNIST.py 文件源码 项目:TFExperiments 作者: gnperdue 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def compute_categorical_loss_and_accuracy(logits, targets):
    """return total loss, reg loss (subset of total), and accuracy"""
    with tf.variable_scope('loss'):
        regularization_losses = sum(
            tf.get_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES
            )
        )
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(
                logits=logits, labels=targets
            ),
            axis=0,
            name='loss'
        ) + regularization_losses
        preds = tf.nn.softmax(logits, name='preds')
        correct_preds = tf.equal(
            tf.argmax(preds, 1), tf.argmax(targets, 1),
            name='correct_preds'
        )
        accuracy = tf.divide(
            tf.reduce_sum(tf.cast(correct_preds, tf.float32)),
            tf.cast(tf.shape(targets)[0], tf.float32),
            name='accuracy'
        )
    return loss, regularization_losses, accuracy
modelmhl.py 文件源码 项目:WaterGAN 作者: kskin 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def read_depth(self, filename):
    depth_mat = sio.loadmat(filename)
    depthtmp=depth_mat["depth"]
    ds = depthtmp.shape
    if self.is_crop:
      depth = scipy.misc.imresize(depthtmp,(self.output_height,self.output_width),mode='F')
    depth = np.array(depth).astype(np.float32)
    depth = np.multiply(self.max_depth,np.divide(depth,depth.max()))

    return depth
modelmhl.py 文件源码 项目:WaterGAN 作者: kskin 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def read_depth_small(self, filename):
    depth_mat = sio.loadmat(filename)
    depthtmp=depth_mat["depth"]
    ds = depthtmp.shape

    if self.is_crop:
      depth = scipy.misc.imresize(depthtmp,(self.output_height,self.output_width),mode='F')
    depth = np.array(depth).astype(np.float32)
    depth = np.multiply(self.max_depth,np.divide(depth,depth.max()))

    return depth
modelmhl.py 文件源码 项目:WaterGAN 作者: kskin 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def read_depth_sample(self, filename):
    depth_mat = sio.loadmat(filename)
    depthtmp=depth_mat["depth"]
    ds = depthtmp.shape
    if self.is_crop:
      depth = scipy.misc.imresize(depthtmp,(self.sh,self.sw),mode='F')
    depth = np.array(depth).astype(np.float32)
    depth = np.multiply(self.max_depth,np.divide(depth,depth.max()))

    return depth
modeljamaica.py 文件源码 项目:WaterGAN 作者: kskin 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def read_depth(self, filename):
    depth_mat = sio.loadmat(filename)
    depthtmp=depth_mat["depth"]
    ds = depthtmp.shape
    if self.is_crop:
      depth = scipy.misc.imresize(depthtmp,(self.output_height,self.output_width),mode='F')
    depth = np.array(depth).astype(np.float32)
    depth = np.multiply(self.max_depth,np.divide(depth,depth.max()))

    return depth
modeljamaica.py 文件源码 项目:WaterGAN 作者: kskin 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def read_depth_small(self, filename):
    depth_mat = sio.loadmat(filename)
    depthtmp=depth_mat["depth"]
    ds = depthtmp.shape

    if self.is_crop:
      depth = scipy.misc.imresize(depthtmp,(self.output_height,self.output_width),mode='F')
    depth = np.array(depth).astype(np.float32)
    depth = np.multiply(self.max_depth,np.divide(depth,depth.max()))

    return depth


问题


面经


文章

微信
公众号

扫码关注公众号