python类abs()的实例源码

adamax.py 文件源码 项目:DNGPU 作者: LUMII-Syslab 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _apply_dense(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
        clip_multiplier_t = math_ops.cast(self.clip_multiplier_t, var.dtype.base_dtype)
        clip_epsilon_t = math_ops.cast(self.clip_epsilon_t, var.dtype.base_dtype)

        v = self.get_slot(var, "v")
        # clip gradient so that each value exceeds its previous maximum by no more than clip_multiplier
        if self.clip_gradients:
            clipVal = v * clip_multiplier_t + clip_epsilon_t
            grad = clip_ops.clip_by_value(grad, -clipVal, clipVal)

        # m := beta1 * m + (1 - beta1) * g_t

        m = self.get_slot(var, "m")
        m_t = state_ops.assign(m, beta1_t * m + (1. - beta1_t) * grad, use_locking=self._use_locking)
        # v := max(beta2 * v , abs(grad))
        v_t = state_ops.assign(v,math_ops.maximum(beta2_t * v, math_ops.abs(grad)), use_locking=self._use_locking)
        # variable -= learning_rate * m_t / (epsilon_t + v_t)
        # we do not use bias-correction term for the first moment; it does not give observable benefit
        var_update = state_ops.assign_sub(var, lr_t * m_t / (v_t+epsilon_t), use_locking=self._use_locking)

        return control_flow_ops.group(*[var_update, v_t, m_t])
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def he_normal(seed=None):
      """He normal initializer.

      It draws samples from a truncated normal distribution centered on 0
      with `stddev = sqrt(2 / fan_in)`
      where `fan_in` is the number of input units in the weight tensor.

      Arguments:
          seed: A Python integer. Used to seed the random generator.

      Returns:
          An initializer.

      References:
          He et al., http://arxiv.org/abs/1502.01852
      """
      return VarianceScaling(
          scale=2., mode='fan_in', distribution='normal', seed=seed)
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def he_uniform(seed=None):
      """He uniform variance scaling initializer.

      It draws samples from a uniform distribution within [-limit, limit]
      where `limit` is `sqrt(6 / fan_in)`
      where `fan_in` is the number of input units in the weight tensor.

      Arguments:
          seed: A Python integer. Used to seed the random generator.

      Returns:
          An initializer.

      References:
          He et al., http://arxiv.org/abs/1502.01852
      """
      return VarianceScaling(
          scale=2., mode='fan_in', distribution='uniform', seed=seed)


    # Compatibility aliases

    # pylint: disable=invalid-name
entropy_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_normal_entropy_sample_form_gets_approximate_answer(self):
    # Tested by showing we get a good answer that is not exact.
    with self.test_session():
      dist = distributions.Normal(loc=1.11, scale=2.22)
      mc_entropy = entropy.entropy_shannon(
          dist, n=1000, form=entropy.ELBOForms.sample, seed=0)
      exact_entropy = dist.entropy()

      self.assertEqual(exact_entropy.get_shape(), mc_entropy.get_shape())

      # Relative tolerance (rtol) chosen 2 times as large as minimim needed to
      # pass.
      self.assertAllClose(exact_entropy.eval(), mc_entropy.eval(), rtol=0.01)

      # Make sure there is some error, proving we used samples
      self.assertLess(0.0001, math_ops.abs(exact_entropy - mc_entropy).eval())
entropy_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_default_entropy_falls_back_on_sample_if_analytic_not_available(self):
    # Tested by showing we get a good answer that is not exact.
    with self.test_session():
      # NormalNoEntropy is like a Normal, but does not have .entropy method, so
      # we are forced to fall back on sample entropy.
      dist_no_entropy = NormalNoEntropy(loc=1.11, scale=2.22)
      dist_yes_entropy = distributions.Normal(loc=1.11, scale=2.22)

      mc_entropy = entropy.entropy_shannon(
          dist_no_entropy, n=1000, form=entropy.ELBOForms.sample, seed=0)
      exact_entropy = dist_yes_entropy.entropy()

      self.assertEqual(exact_entropy.get_shape(), mc_entropy.get_shape())

      # Relative tolerance (rtol) chosen 2 times as large as minimim needed to
      # pass.
      self.assertAllClose(exact_entropy.eval(), mc_entropy.eval(), rtol=0.01)

      # Make sure there is some error, proving we used samples
      self.assertLess(0.0001, math_ops.abs(exact_entropy - mc_entropy).eval())
optimizers_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def testNoGlobalStep(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
        x = array_ops.placeholder(dtypes.float32, [])
        var = variable_scope.get_variable(
            "test", [], initializer=init_ops.constant_initializer(10))
        loss = math_ops.abs(var * x)
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        train = optimizers_lib.optimize_loss(
            loss,
            global_step=None,
            learning_rate=0.1,
            optimizer=optimizer,
            update_ops=[update_op])
        variables.global_variables_initializer().run()
        session.run(train, feed_dict={x: 5})
        self.assertEqual(9.5, var.eval())
        self.assertEqual(20, update_var.eval())
optimizers_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def testNoGlobalStepWithDecay(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.test_session(graph=g):
        x = array_ops.placeholder(dtypes.float32, [])
        var = variable_scope.get_variable(
            "test", [], initializer=init_ops.constant_initializer(10))
        loss = math_ops.abs(var * x)
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        with self.assertRaisesRegexp(
            ValueError, "global_step is required for learning_rate_decay_fn"):
          optimizers_lib.optimize_loss(
              loss,
              global_step=None,
              learning_rate=0.1,
              learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
              optimizer=optimizer,
              update_ops=[update_op])
linear_operator_test_util.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_log_abs_det(self):
    self._maybe_skip("log_abs_det")
    for use_placeholder in False, True:
      for shape in self._shapes_to_test:
        for dtype in self._dtypes_to_test:
          if dtype.is_complex:
            self.skipTest(
                "tf.matrix_determinant does not work with complex, so this "
                "test is being skipped.")
          with self.test_session(graph=ops.Graph()) as sess:
            sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
            operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
                shape, dtype, use_placeholder=use_placeholder)
            op_log_abs_det = operator.log_abs_determinant()
            mat_log_abs_det = math_ops.log(
                math_ops.abs(linalg_ops.matrix_determinant(mat)))
            if not use_placeholder:
              self.assertAllEqual(shape[:-2], op_log_abs_det.get_shape())
            op_log_abs_det_v, mat_log_abs_det_v = sess.run(
                [op_log_abs_det, mat_log_abs_det],
                feed_dict=feed_dict)
            self.assertAC(op_log_abs_det_v, mat_log_abs_det_v)
operator_pd_vdvt_update.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _sqrt_log_det_core(self, diag_chol_c):
    """Finish computation of Sqrt[Log[Det]]."""
    # Complete computation of ._log_det and ._batch_log_det, after the initial
    # Cholesky factor has been taken with the appropriate batch/non-batch method

    # det(M + VDV^T) = det(D^{-1} + V^T M^{-1} V) * det(D) * det(M)
    #                = det(C) * det(D) * det(M)
    # Multiply by 2 here because this is the log-det of the Cholesky factor of C
    log_det_c = 2 * math_ops.reduce_sum(
        math_ops.log(math_ops.abs(diag_chol_c)),
        reduction_indices=[-1])
    # Add together to get Log[det(M + VDV^T)], the Log-det of the updated square
    # root.
    log_det_updated_sqrt = (
        log_det_c + self._diag_operator.log_det() + self._operator.log_det())
    return log_det_updated_sqrt
student_t.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self,
               df,
               loc,
               scale,
               validate_args=False,
               allow_nan_stats=True,
               name="StudentTWithAbsDfSoftplusScale"):
    parameters = locals()
    parameters.pop("self")
    with ops.name_scope(name, values=[df, scale]) as ns:
      super(StudentTWithAbsDfSoftplusScale, self).__init__(
          df=math_ops.floor(math_ops.abs(df)),
          loc=loc,
          scale=nn.softplus(scale, name="softplus_scale"),
          validate_args=validate_args,
          allow_nan_stats=allow_nan_stats,
          name=ns)
    self._parameters = parameters
unary_ops_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def testNumericOps(self):
    for dtype in self.numeric_types:
      self._testUnary(
          math_ops.abs,
          np.array([[2, -1]], dtype=dtype),
          expected=np.array([[2, 1]], dtype=dtype))

      self._testUnary(
          math_ops.negative,
          np.array([[-1, 1]], dtype=dtype),
          expected=np.array([[1, -1]], dtype=dtype))

      self._testUnary(
          math_ops.square,
          np.array([[-2, 3]], dtype=dtype),
          expected=np.array([[4, 9]], dtype=dtype))

      self._testUnary(
          array_ops.zeros_like,
          np.array([[4, 3], [2, 1]], dtype=dtype),
          expected=np.array([[0, 0], [0, 0]], dtype=dtype))
adamax.py 文件源码 项目:DNGPU 作者: LUMII-Syslab 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _apply_sparse(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
        clip_multiplier_t = math_ops.cast(self.clip_multiplier_t, var.dtype.base_dtype)
        clip_epsilon_t = math_ops.cast(self.clip_epsilon_t, var.dtype.base_dtype)

        v = self.get_slot(var, "v")
        v_slice = array_ops.gather(v, grad.indices)

        #clip gradient so that each value exceeds its previous maximum by no more than clip_multiplier
        clipped_values = grad.values
        if self.clip_gradients:
            clipVal = v_slice * clip_multiplier_t + clip_epsilon_t
            clipped_values = clip_ops.clip_by_value(grad.values, -clipVal, clipVal)

        # m := beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_t_values = beta1_t * array_ops.gather(m, grad.indices) + (1 - beta1_t) * clipped_values
        m_t = state_ops.scatter_update(m, grad.indices, m_t_values, use_locking=self._use_locking)

        # v := max(beta2 * v , abs(grad))
        v_t_values = math_ops.maximum(beta2_t * v_slice, math_ops.abs(clipped_values))
        v_t = state_ops.scatter_update(v, grad.indices, v_t_values, use_locking=self._use_locking)

        # variable -= learning_rate * m_t / (epsilon_t + v_t)
        # we do not use bias-correction term for the first moment; it does not give observable benefit
        var_update = state_ops.scatter_sub(var, grad.indices,
                                           lr_t * m_t_values / (v_t_values + epsilon_t),
                                           use_locking=self._use_locking)
        return control_flow_ops.group(var_update, v_t, m_t)
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def abs(x):
      """Element-wise absolute value.

      Arguments:
          x: Tensor or variable.

      Returns:
          A tensor.
      """
      return math_ops.abs(x)
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def mean_absolute_error(y_true, y_pred):
      return K.mean(K.abs(y_pred - y_true), axis=-1)
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def mean_absolute_percentage_error(y_true, y_pred):
      # Equivalent to MAE, but sometimes easier to interpret.
      diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
      return 100. * K.mean(diff, axis=-1)
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, x):
        regularization = 0.
        if self.l1:
          regularization += K.sum(self.l1 * K.abs(x))
        if self.l2:
          regularization += K.sum(self.l2 * K.square(x))
        return regularization
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def call(self, inputs, mask=None):
            pos = K.relu(inputs)
            if K.backend() == 'theano':
              neg = (K.pattern_broadcast(self.alpha, self.param_broadcast) *
                     (inputs - K.abs(inputs)) * 0.5)
            else:
              neg = -self.alpha * K.relu(-inputs)
            return pos + neg
tf-keras-skeleton.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _object_list_uid(object_list):
          object_list = _to_list(object_list)
          return ', '.join([str(abs(id(x))) for x in object_list])
special_math.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _ndtr(x):
  """Implements ndtr core logic."""
  half_sqrt_2 = constant_op.constant(
      0.5 * math.sqrt(2.), dtype=x.dtype, name="half_sqrt_2")
  w = x * half_sqrt_2
  z = math_ops.abs(w)
  y = math_ops.select(math_ops.less(z, half_sqrt_2),
                      1. + math_ops.erf(w),
                      math_ops.select(math_ops.greater(w, 0.),
                                      2. - math_ops.erfc(z),
                                      math_ops.erfc(z)))
  return 0.5 * y
loss_ops.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def absolute_difference(predictions, targets, weight=1.0, scope=None):
  """Adds an Absolute Difference loss to the training procedure.

  `weight` acts as a coefficient for the loss. If a scalar is provided, then the
  loss is simply scaled by the given value. If `weight` is a tensor of size
  [batch_size], then the total loss for each sample of the batch is rescaled
  by the corresponding element in the `weight` vector. If the shape of
  `weight` matches the shape of `predictions`, then the loss of each
  measurable element of `predictions` is scaled by the corresponding value of
  `weight`.

  Args:
    predictions: The predicted outputs.
    targets: The ground truth output tensor, same dimensions as 'predictions'.
    weight: Coefficients for the loss a scalar, a tensor of shape
      [batch_size] or a tensor whose shape matches `predictions`.
    scope: The scope for the operations performed in computing the loss.

  Returns:
    A scalar `Tensor` representing the loss value.

  Raises:
    ValueError: If the shape of `predictions` doesn't match that of `targets` or
      if the shape of `weight` is invalid.
  """
  with ops.name_scope(scope, "absolute_difference",
                      [predictions, targets]) as scope:
    predictions.get_shape().assert_is_compatible_with(targets.get_shape())
    if weight is None:
      raise ValueError("`weight` cannot be None")
    predictions = math_ops.to_float(predictions)
    targets = math_ops.to_float(targets)
    losses = math_ops.abs(math_ops.sub(predictions, targets))
    return compute_weighted_loss(losses, weight)
sdca_ops.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _l1_loss(self):
    """Computes the (un-normalized) l1 loss of the model."""
    with name_scope('sdca/l1_loss'):
      sums = []
      for name in ['sparse_features_weights', 'dense_features_weights']:
        for weights in self._convert_n_to_tensor(self._variables[name]):
          with ops.device(weights.device):
            sums.append(
                math_ops.reduce_sum(
                    math_ops.abs(math_ops.cast(weights, dtypes.float64))))
      sum = math_ops.add_n(sums)
      # SDCA L1 regularization cost is: l1 * sum(|weights|)
      return self._options['symmetric_l1_regularization'] * sum
laplace.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _log_prob(self, x):
    return (-math.log(2.) - math_ops.log(self.scale) -
            math_ops.abs(x - self.loc) / self.scale)
laplace.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def _prob(self, x):
    return 0.5 / self.scale * math_ops.exp(
        -math_ops.abs(x - self.loc) / self.scale)
laplace.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _cdf(self, x):
    y = x - self.loc
    return (0.5 + 0.5 * math_ops.sign(y) *
            (1. - math_ops.exp(-math_ops.abs(y) / self.scale)))
distribution_util.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def assert_close(
    x, y, data=None, summarize=None, message=None, name="assert_close"):
  """Assert that that x and y are within machine epsilon of each other.

  Args:
    x: Numeric `Tensor`
    y: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if |x - y| > machine epsilon.
  """
  message = message or ""
  x = ops.convert_to_tensor(x, name="x")
  y = ops.convert_to_tensor(y, name="y")

  if x.dtype.is_integer:
    return check_ops.assert_equal(
        x, y, data=data, summarize=summarize, message=message, name=name)

  with ops.name_scope(name, "assert_close", [x, y, data]):
    tol = np.finfo(x.dtype.as_numpy_dtype).resolution
    if data is None:
      data = [
          message,
          "Condition x ~= y did not hold element-wise: x = ", x.name, x, "y = ",
          y.name, y
      ]
    condition = math_ops.reduce_all(math_ops.less_equal(math_ops.abs(x-y), tol))
    return control_flow_ops.Assert(
        condition, data, summarize=summarize)
special_math.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _ndtr(x):
  """Implements ndtr core logic."""
  half_sqrt_2 = constant_op.constant(
      0.5 * math.sqrt(2.), dtype=x.dtype, name="half_sqrt_2")
  w = x * half_sqrt_2
  z = math_ops.abs(w)
  y = math_ops.select(math_ops.less(z, half_sqrt_2),
                      1. + math_ops.erf(w),
                      math_ops.select(math_ops.greater(w, 0.),
                                      2. - math_ops.erfc(z),
                                      math_ops.erfc(z)))
  return 0.5 * y
sdca_ops.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _shard_indices(self, keys):
    key_shape = keys.get_shape()
    if key_shape.ndims > 1:
      # If keys are a matrix (i.e. a single key is a vector), we use the first
      # element of each key vector to determine the shard.
      keys = array_ops.slice(keys, [0, 0], [key_shape[0].value, 1])
      keys = array_ops.reshape(keys, [-1])
    indices = math_ops.mod(math_ops.abs(keys), self._num_shards)
    return math_ops.cast(indices, dtypes.int32)
sdca_ops.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _l1_loss(self):
    """Computes the (un-normalized) l1 loss of the model."""
    with name_scope('sdca/l1_loss'):
      sums = []
      for name in ['sparse_features_weights', 'dense_features_weights']:
        for weights in self._convert_n_to_tensor(self._variables[name]):
          with ops.device(weights.device):
            sums.append(
                math_ops.reduce_sum(
                    math_ops.abs(math_ops.cast(weights, dtypes.float64))))
      sum = math_ops.add_n(sums)
      # SDCA L1 regularization cost is: l1 * sum(|weights|)
      return self._options['symmetric_l1_regularization'] * sum
laplace.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _sample_n(self, n, seed=None):
    shape = array_ops.concat(0, ([n], self.batch_shape()))
    # Sample uniformly-at-random from the open-interval (-1, 1).
    uniform_samples = random_ops.random_uniform(
        shape=shape,
        minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
                            self.dtype.as_numpy_dtype(0.)),
        maxval=1.,
        dtype=self.dtype,
        seed=seed)
    return (self.loc - self.scale * math_ops.sign(uniform_samples) *
            math_ops.log(1. - math_ops.abs(uniform_samples)))
laplace.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _prob(self, x):
    return 0.5 / self.scale * math_ops.exp(
        -math_ops.abs(x - self.loc) / self.scale)


问题


面经


文章

微信
公众号

扫码关注公众号