python类cast()的实例源码

adamax.py 文件源码 项目:DNGPU 作者: LUMII-Syslab 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _apply_dense(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
        clip_multiplier_t = math_ops.cast(self.clip_multiplier_t, var.dtype.base_dtype)
        clip_epsilon_t = math_ops.cast(self.clip_epsilon_t, var.dtype.base_dtype)

        v = self.get_slot(var, "v")
        # clip gradient so that each value exceeds its previous maximum by no more than clip_multiplier
        if self.clip_gradients:
            clipVal = v * clip_multiplier_t + clip_epsilon_t
            grad = clip_ops.clip_by_value(grad, -clipVal, clipVal)

        # m := beta1 * m + (1 - beta1) * g_t

        m = self.get_slot(var, "m")
        m_t = state_ops.assign(m, beta1_t * m + (1. - beta1_t) * grad, use_locking=self._use_locking)
        # v := max(beta2 * v , abs(grad))
        v_t = state_ops.assign(v,math_ops.maximum(beta2_t * v, math_ops.abs(grad)), use_locking=self._use_locking)
        # variable -= learning_rate * m_t / (epsilon_t + v_t)
        # we do not use bias-correction term for the first moment; it does not give observable benefit
        var_update = state_ops.assign_sub(var, lr_t * m_t / (v_t+epsilon_t), use_locking=self._use_locking)

        return control_flow_ops.group(*[var_update, v_t, m_t])
losses.py 文件源码 项目:opinatt 作者: epochx 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_classification_loss(logits, targets, softmax_loss_function=None):
  bucket_outputs = logits
  if softmax_loss_function is None:
    assert len(bucket_outputs) == len(targets) == 1
    # We need to make target an int64-tensor and set its shape.
    bucket_target = array_ops.reshape(math_ops.to_int64(targets[0]), [-1])
    crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(logits=bucket_outputs[0],
                                                               labels=bucket_target)
  else:
    assert len(bucket_outputs) == len(targets) == 1
    crossent = softmax_loss_function(bucket_outputs[0], targets[0])

  batch_size = array_ops.shape(targets[0])[0]
  loss = tf.reduce_sum(crossent) / math_ops.cast(batch_size, dtypes.float32)

  return loss
kitti_low_input.py 文件源码 项目:KittiClass 作者: MarvinTeichmann 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def create_queues(hypes, phase):
    """Create Queues."""
    arch = hypes['arch']
    dtypes = [tf.float32, tf.int32]

    height = 224
    width = 224
    channel = 3
    shapes = [[height, width, channel], []]

    capacity = 50
    q = tf.FIFOQueue(capacity=50, dtypes=dtypes, shapes=shapes)
    tf.summary.scalar("queue/%s/fraction_of_%d_full" %
                      (q.name + "_" + phase, capacity),
                      math_ops.cast(q.size(), tf.float32) * (1. / capacity))

    return q
kitti_low_input.py 文件源码 项目:KittiClass 作者: MarvinTeichmann 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def shuffle_join(tensor_list_list, capacity,
                 min_ad, phase):
    name = 'shuffel_input'
    types = _dtypes(tensor_list_list)
    queue = data_flow_ops.RandomShuffleQueue(
        capacity=capacity, min_after_dequeue=min_ad,
        dtypes=types)

    # Build enque Operations
    _enqueue_join(queue, tensor_list_list)

    full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_ad),
                          dtypes.float32) * (1. / (capacity - min_ad)))
    # Note that name contains a '/' at the end so we intentionally do not place
    # a '/' after %s below.
    summary_name = (
        "queue/%s/fraction_over_%d_of_%d_full" %
        (name + '_' + phase, min_ad, capacity - min_ad))
    tf.summary.scalar(summary_name, full)

    dequeued = queue.dequeue(name='shuffel_deqeue')
    # dequeued = _deserialize_sparse_tensors(dequeued, sparse_info)
    return dequeued
kitti_input.py 文件源码 项目:KittiClass 作者: MarvinTeichmann 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def create_queues(hypes, phase):
    """Create Queues."""
    arch = hypes['arch']
    dtypes = [tf.float32, tf.int32]

    shape_known = hypes['jitter']['fix_shape'] or \
        hypes['jitter']['resize_image']

    if shape_known:
        height = hypes['jitter']['image_height']
        width = hypes['jitter']['image_width']
        channel = hypes['arch']['num_channels']
        shapes = [[height, width, channel],
                  []]
    else:
        shapes = None

    capacity = 50
    q = tf.FIFOQueue(capacity=50, dtypes=dtypes, shapes=shapes)
    tf.summary.scalar("queue/%s/fraction_of_%d_full" %
                      (q.name + "_" + phase, capacity),
                      math_ops.cast(q.size(), tf.float32) * (1. / capacity))

    return q
kitti_input.py 文件源码 项目:KittiClass 作者: MarvinTeichmann 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def shuffle_join(tensor_list_list, capacity,
                 min_ad, phase):
    name = 'shuffel_input'
    types = _dtypes(tensor_list_list)
    queue = data_flow_ops.RandomShuffleQueue(
        capacity=capacity, min_after_dequeue=min_ad,
        dtypes=types)

    # Build enque Operations
    _enqueue_join(queue, tensor_list_list)

    full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_ad),
                          dtypes.float32) * (1. / (capacity - min_ad)))
    # Note that name contains a '/' at the end so we intentionally do not place
    # a '/' after %s below.
    summary_name = (
        "queue/%s/fraction_over_%d_of_%d_full" %
        (name + '_' + phase, min_ad, capacity - min_ad))
    tf.summary.scalar(summary_name, full)

    dequeued = queue.dequeue(name='shuffel_deqeue')
    # dequeued = _deserialize_sparse_tensors(dequeued, sparse_info)
    return dequeued
voc_seg_input.py 文件源码 项目:VOCSeg 作者: lxh-123 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def shuffle_join(tensor_list_list, capacity, min_ad, phase):
    name = 'shuffel_input'
    types = _dtypes(tensor_list_list)
    queue = data_flow_ops.RandomShuffleQueue(capacity=capacity, min_after_dequeue=min_ad, dtypes=types)

    # Build enque Operations
    _enqueue_join(queue, tensor_list_list)

    full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_ad), dtypes.float32) * (1. / (capacity - min_ad)))
    # Note that name contains a '/' at the end so we intentionally do not place
    # a '/' after %s below.
    summary_name = (
        "queue/%s/fraction_over_%d_of_%d_full" %
        (name + '_' + phase, min_ad, capacity - min_ad))
    tf.summary.scalar(summary_name, full)

    dequeued = queue.dequeue(name='shuffel_deqeue')
    # dequeued = _deserialize_sparse_tensors(dequeued, sparse_info)
    return dequeued
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def cast_to_floatx(x):
      """Cast a Numpy array to the default Keras float type.

      Arguments:
          x: Numpy array.

      Returns:
          The same Numpy array, cast to its new type.

      Example:
      ```python
          >>> from keras import backend as K
          >>> K.floatx()
          'float32'
          >>> arr = numpy.array([1.0, 2.0], dtype='float64')
          >>> arr.dtype
          dtype('float64')
          >>> new_arr = K.cast_to_floatx(arr)
          >>> new_arr
          array([ 1.,  2.], dtype=float32)
          >>> new_arr.dtype
          dtype('float32')
"""
  return np.asarray(x, dtype=_FLOATX)

```

tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def var(x, axis=None, keepdims=False):
      """Variance of a tensor, alongside the specified axis.

      Arguments:
          x: A tensor or variable.
          axis: An integer, the axis to compute the variance.
          keepdims: A boolean, whether to keep the dimensions or not.
              If `keepdims` is `False`, the rank of the tensor is reduced
              by 1. If `keepdims` is `True`,
              the reduced dimension is retained with length 1.

      Returns:
          A tensor with the variance of elements of `x`.
      """
      axis = _normalize_axis(axis, ndim(x))
      if x.dtype.base_dtype == dtypes_module.bool:
        x = math_ops.cast(x, floatx())
      m = math_ops.reduce_mean(x, reduction_indices=axis, keep_dims=True)
      devs_squared = math_ops.square(x - m)
      return math_ops.reduce_mean(
          devs_squared, reduction_indices=axis, keep_dims=keepdims)
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def mean(x, axis=None, keepdims=False):
      """Mean of a tensor, alongside the specified axis.

      Arguments:
          x: A tensor or variable.
          axis: A list of integer. Axes to compute the mean.
          keepdims: A boolean, whether to keep the dimensions or not.
              If `keepdims` is `False`, the rank of the tensor is reduced
              by 1 for each entry in `axis`. If `keep_dims` is `True`,
              the reduced dimensions are retained with length 1.

      Returns:
          A tensor with the mean of elements of `x`.
      """
      axis = _normalize_axis(axis, ndim(x))
      if x.dtype.base_dtype == dtypes_module.bool:
        x = math_ops.cast(x, floatx())
      return math_ops.reduce_mean(x, reduction_indices=axis, keep_dims=keepdims)
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _preprocess_conv2d_input(x, data_format):
      """Transpose and cast the input before the conv2d.

      Arguments:
          x: input tensor.
          data_format: string, one of 'channels_last', 'channels_first'.

      Returns:
          A tensor.
      """
      if dtype(x) == 'float64':
        x = math_ops.cast(x, 'float32')
      if data_format == 'channels_first':
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, rows, cols)
        # TF input shape: (samples, rows, cols, input_depth)
        x = array_ops.transpose(x, (0, 2, 3, 1))
      return x
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def _postprocess_conv2d_output(x, data_format):
      """Transpose and cast the output from conv2d if needed.

      Arguments:
          x: A tensor.
          data_format: string, one of "channels_last", "channels_first".

      Returns:
          A tensor.
      """

      if data_format == 'channels_first':
        x = array_ops.transpose(x, (0, 3, 1, 2))

      if floatx() == 'float64':
        x = math_ops.cast(x, 'float64')
      return x
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _postprocess_conv3d_output(x, data_format):
      """Transpose and cast the output from conv3d if needed.

      Arguments:
          x: A tensor.
          data_format: string, one of "channels_last", "channels_first".

      Returns:
          A tensor.
      """
      if data_format == 'channels_first':
        x = array_ops.transpose(x, (0, 4, 1, 2, 3))

      if floatx() == 'float64':
        x = math_ops.cast(x, 'float64')
      return x
sdca_ops.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def approximate_duality_gap(self):
    """Add operations to compute the approximate duality gap.

    Returns:
      An Operation that computes the approximate duality gap over all
      examples.
    """
    with name_scope('sdca/approximate_duality_gap'):
      _, values_list = self._hashtable.export_sharded()
      shard_sums = []
      for values in values_list:
        with ops.device(values.device):
          shard_sums.append(
              math_ops.reduce_sum(math_ops.cast(values, dtypes.float64), 0))
      summed_values = math_ops.add_n(shard_sums)

      primal_loss = summed_values[1]
      dual_loss = summed_values[2]
      example_weights = summed_values[3]
      # Note: we return NaN if there are no weights or all weights are 0, e.g.
      # if no examples have been processed
      return (primal_loss + dual_loss + self._l1_loss() +
              (2.0 * self._l2_loss(self._symmetric_l2_regularization()))
             ) / example_weights
sdca_ops.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def regularized_loss(self, examples):
    """Add operations to compute the loss with regularization loss included.

    Args:
      examples: Examples to compute loss on.

    Returns:
      An Operation that computes mean (regularized) loss for given set of
      examples.
    Raises:
      ValueError: if examples are not well defined.
    """
    self._assertSpecified(['example_labels', 'example_weights',
                           'sparse_features', 'dense_features'], examples)
    self._assertList(['sparse_features', 'dense_features'], examples)
    with name_scope('sdca/regularized_loss'):
      weights = convert_to_tensor(examples['example_weights'])
      return ((
          self._l1_loss() +
          # Note that here we are using the raw regularization
          # (as specified by the user) and *not*
          # self._symmetric_l2_regularization().
          self._l2_loss(self._options['symmetric_l2_regularization'])) /
              math_ops.reduce_sum(math_ops.cast(weights, dtypes.float64)) +
              self.unregularized_loss(examples))
bernoulli.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _log_prob(self, event):
    # TODO(jaana): The current sigmoid_cross_entropy_with_logits has
    # inconsistent  behavior for logits = inf/-inf.
    event = ops.convert_to_tensor(event, name="event")
    event = math_ops.cast(event, self.logits.dtype)
    logits = self.logits
    # sigmoid_cross_entropy_with_logits doesn't broadcast shape,
    # so we do this here.
    # TODO(b/30637701): Check dynamic shape, and don't broadcast if the
    # dynamic shapes are the same.
    if (not event.get_shape().is_fully_defined() or
        not logits.get_shape().is_fully_defined() or
        event.get_shape() != logits.get_shape()):
      logits = array_ops.ones_like(event) * logits
      event = array_ops.ones_like(logits) * event
    return -nn.sigmoid_cross_entropy_with_logits(logits, event)
distribution_util.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def assert_integer_form(
    x, data=None, summarize=None, message=None, name="assert_integer_form"):
  """Assert that x has integer components (or floats equal to integers).

  Args:
    x: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if round(x) != x.
  """

  message = message or "x has non-integer components"
  x = ops.convert_to_tensor(x, name="x")
  casted_x = math_ops.to_int64(x)
  return check_ops.assert_equal(
      x, math_ops.cast(math_ops.round(casted_x), x.dtype),
      data=data, summarize=summarize, message=message, name=name)
dirichlet.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def _mode(self):
    mode = ((self.alpha - 1.) /
            (array_ops.expand_dims(self.alpha_sum, dim=-1) -
             math_ops.cast(self.event_shape()[0], self.dtype)))
    if self.allow_nan_stats:
      nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
      shape = array_ops.concat(0, (self.batch_shape(), self.event_shape()))
      return math_ops.select(
          math_ops.greater(self.alpha, 1.),
          mode,
          array_ops.fill(shape, nan, name="nan"))
    else:
      return control_flow_ops.with_dependencies([
          check_ops.assert_less(
              array_ops.ones((), dtype=self.dtype), self.alpha,
              message="mode not defined for components of alpha <= 1")
      ], mode)
sdca_ops.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def regularized_loss(self, examples):
    """Add operations to compute the loss with regularization loss included.

    Args:
      examples: Examples to compute loss on.

    Returns:
      An Operation that computes mean (regularized) loss for given set of
      examples.
    Raises:
      ValueError: if examples are not well defined.
    """
    self._assertSpecified(['example_labels', 'example_weights',
                           'sparse_features', 'dense_features'], examples)
    self._assertList(['sparse_features', 'dense_features'], examples)
    with name_scope('sdca/regularized_loss'):
      weights = convert_to_tensor(examples['example_weights'])
      return ((
          self._l1_loss() +
          # Note that here we are using the raw regularization
          # (as specified by the user) and *not*
          # self._symmetric_l2_regularization().
          self._l2_loss(self._options['symmetric_l2_regularization'])) /
              math_ops.reduce_sum(math_ops.cast(weights, dtypes.float64)) +
              self.unregularized_loss(examples))
array_ops.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def one_hot_matrix(tensor_in, num_classes, on_value=1.0, off_value=0.0):
  """Encodes indices from given tensor as one-hot tensor.

  TODO(ilblackdragon): Ideally implementation should be
  part of TensorFlow with Eigen-native operation.

  Args:
    tensor_in: Input tensor of shape [N1, N2].
    num_classes: Number of classes to expand index into.
    on_value: Tensor or float, value to fill-in given index.
    off_value: Tensor or float, value to fill-in everything else.
  Returns:
    Tensor of shape [N1, N2, num_classes] with 1.0 for each id in original
    tensor.
  """
  return array_ops_.one_hot(
      math_ops.cast(tensor_in, dtypes.int64), num_classes, on_value, off_value)
bernoulli.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _log_prob(self, event):
    # TODO(jaana): The current sigmoid_cross_entropy_with_logits has
    # inconsistent  behavior for logits = inf/-inf.
    event = ops.convert_to_tensor(event, name="event")
    event = math_ops.cast(event, self.logits.dtype)
    logits = self.logits
    # sigmoid_cross_entropy_with_logits doesn't broadcast shape,
    # so we do this here.
    # TODO(b/30637701): Check dynamic shape, and don't broadcast if the
    # dynamic shapes are the same.
    if (not event.get_shape().is_fully_defined() or
        not logits.get_shape().is_fully_defined() or
        event.get_shape() != logits.get_shape()):
      logits = array_ops.ones_like(event) * logits
      event = array_ops.ones_like(logits) * event
    return -nn.sigmoid_cross_entropy_with_logits(logits, event)
dirichlet.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _mode(self):
    mode = ((self.alpha - 1.) /
            (array_ops.expand_dims(self.alpha_sum, dim=-1) -
             math_ops.cast(self.event_shape()[0], self.dtype)))
    if self.allow_nan_stats:
      nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
      shape = array_ops.concat(0, (self.batch_shape(), self.event_shape()))
      return math_ops.select(
          math_ops.greater(self.alpha, 1.),
          mode,
          array_ops.fill(shape, nan, name="nan"))
    else:
      return control_flow_ops.with_dependencies([
          check_ops.assert_less(
              array_ops.ones((), dtype=self.dtype), self.alpha,
              message="mode not defined for components of alpha <= 1")
      ], mode)
kitti_seg_input.py 文件源码 项目:KittiSeg 作者: MarvinTeichmann 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def shuffle_join(tensor_list_list, capacity,
                 min_ad, phase):
    name = 'shuffel_input'
    types = _dtypes(tensor_list_list)
    queue = data_flow_ops.RandomShuffleQueue(
        capacity=capacity, min_after_dequeue=min_ad,
        dtypes=types)

    # Build enque Operations
    _enqueue_join(queue, tensor_list_list)

    full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_ad),
                          dtypes.float32) * (1. / (capacity - min_ad)))
    # Note that name contains a '/' at the end so we intentionally do not place
    # a '/' after %s below.
    summary_name = (
        "queue/%s/fraction_over_%d_of_%d_full" %
        (name + '_' + phase, min_ad, capacity - min_ad))
    tf.summary.scalar(summary_name, full)

    dequeued = queue.dequeue(name='shuffel_deqeue')
    # dequeued = _deserialize_sparse_tensors(dequeued, sparse_info)
    return dequeued
losses.py 文件源码 项目:3D-convolutional-speaker-recognition 作者: astorfi 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def contrastive_loss(labels, logits, margin_gen=0, margin_imp=1, scope=None):
    """With this definition the loss will be calculated.
        Args:
          y: The labels.
          distance: The distance vector between the output features..
          batch_size: the batch size is necessary because the loss calculation would be over each batch.
        Returns:
          The total loss.
    """
    with ops.name_scope(scope, "contrastive_loss", [labels, logits]) as scope:
        # logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())

        labels = math_ops.cast(labels, logits.dtype)

        # term_1 = tf.multiply(labels, tf.square(logits))
        term_1 = tf.multiply(labels, tf.square(tf.maximum((logits - margin_gen), 0)))
        term_2 = tf.multiply(1 - labels, tf.square(tf.maximum((margin_imp - logits), 0)))

        # Contrastive
        Contrastive_Loss = tf.add(term_1, term_2) / 2
        loss = tf.losses.compute_weighted_loss(Contrastive_Loss, scope=scope)

        return loss
losses.py 文件源码 项目:3D-convolutional-speaker-recognition 作者: astorfi 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def contrastive_loss(labels, logits, margin_gen=0, margin_imp=1, scope=None):
    """With this definition the loss will be calculated.
        Args:
          y: The labels.
          distance: The distance vector between the output features..
          batch_size: the batch size is necessary because the loss calculation would be over each batch.
        Returns:
          The total loss.
    """
    with ops.name_scope(scope, "contrastive_loss", [labels, logits]) as scope:
        # logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())

        labels = math_ops.cast(labels, logits.dtype)

        # term_1 = tf.multiply(labels, tf.square(logits))
        term_1 = tf.multiply(labels, tf.square(tf.maximum((logits - margin_gen), 0)))
        term_2 = tf.multiply(1 - labels, tf.square(tf.maximum((margin_imp - logits), 0)))

        # Contrastive
        Contrastive_Loss = tf.add(term_1, term_2) / 2
        loss = tf.losses.compute_weighted_loss(Contrastive_Loss, scope=scope)

        return loss
losses.py 文件源码 项目:3D-convolutional-speaker-recognition 作者: astorfi 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def contrastive_loss(labels, logits, margin_gen=0, margin_imp=1, scope=None):
    """With this definition the loss will be calculated.
        Args:
          y: The labels.
          distance: The distance vector between the output features..
          batch_size: the batch size is necessary because the loss calculation would be over each batch.
        Returns:
          The total loss.
    """
    with ops.name_scope(scope, "contrastive_loss", [labels, logits]) as scope:
        # logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())

        labels = math_ops.cast(labels, logits.dtype)

        # term_1 = tf.multiply(labels, tf.square(logits))
        term_1 = tf.multiply(labels, tf.square(tf.maximum((logits - margin_gen), 0)))
        term_2 = tf.multiply(1 - labels, tf.square(tf.maximum((margin_imp - logits), 0)))

        # Contrastive
        Contrastive_Loss = tf.add(term_1, term_2) / 2
        loss = tf.losses.compute_weighted_loss(Contrastive_Loss, scope=scope)

        return loss
adamax.py 文件源码 项目:iaf 作者: openai 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _apply_dense(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        if var.dtype.base_dtype == tf.float16:
            eps = 1e-7  # Can't use 1e-8 due to underflow -- not sure if it makes a big difference.
        else:
            eps = 1e-8

        v = self.get_slot(var, "v")
        v_t = v.assign(beta1_t * v + (1. - beta1_t) * grad)
        m = self.get_slot(var, "m")
        m_t = m.assign(tf.maximum(beta2_t * m + eps, tf.abs(grad)))
        g_t = v_t / m_t

        var_update = state_ops.assign_sub(var, lr_t * g_t)
        return control_flow_ops.group(*[var_update, m_t, v_t])
stochastic_graph_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def testTraversesControlInputs(self):
    dt1 = st.StochasticTensor(distributions.Normal(loc=0., scale=1.))
    logits = dt1.value() * 3.
    dt2 = st.StochasticTensor(distributions.Bernoulli(logits=logits))
    dt3 = st.StochasticTensor(distributions.Normal(loc=0., scale=1.))
    x = dt3.value()
    y = array_ops.ones((2, 2)) * 4.
    z = array_ops.ones((2, 2)) * 3.
    out = control_flow_ops.cond(
        math_ops.cast(dt2, dtypes.bool), lambda: math_ops.add(x, y),
        lambda: math_ops.square(z))
    out += 5.
    dep_map = sg._stochastic_dependencies_map([out])
    self.assertEqual(dep_map[dt1], set([out]))
    self.assertEqual(dep_map[dt2], set([out]))
    self.assertEqual(dep_map[dt3], set([out]))
odes.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def _optimal_step_size(last_step,
                       error_ratio,
                       safety=0.9,
                       ifactor=10.0,
                       dfactor=0.2,
                       order=5,
                       name=None):
  """Calculate the optimal size for the next Runge-Kutta step."""
  with ops.name_scope(
      name, 'optimal_step_size', [last_step, error_ratio]) as scope:
    error_ratio = math_ops.cast(error_ratio, last_step.dtype)
    exponent = math_ops.cast(1 / order, last_step.dtype)
    # this looks more complex than necessary, but importantly it keeps
    # error_ratio in the numerator so we can't divide by zero:
    factor = math_ops.maximum(
        1 / ifactor,
        math_ops.minimum(error_ratio ** exponent / safety, 1 / dfactor))
    return math_ops.div(last_step, factor, name=scope)
helper.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def sample(self, time, outputs, name=None, **unused_kwargs):
    with ops.name_scope(name, "TrainingHelperSample", [time, outputs]):
      sample_ids = math_ops.cast(
          math_ops.argmax(outputs, axis=-1), dtypes.int32)
      return sample_ids


问题


面经


文章

微信
公众号

扫码关注公众号