python类to_int64()的实例源码

nn_with_learning_rate.py 文件源码 项目:DataMining 作者: lidalei 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    """Calculates the loss from the logits and the labels.    
    Args:
      logits: Logits tensor, float - [batch_size, NUM_CLASSES].
      labels: Labels tensor, int32 - [batch_size].

    Returns:
      loss: Loss tensor of type float.
    """
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name = 'xentropy')
    loss = tf.reduce_mean(cross_entropy, name = 'xentropy_mean')
    return loss
nn_with_nodes.py 文件源码 项目:DataMining 作者: lidalei 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    """Calculates the loss from the logits and the labels.    
    Args:
      logits: Logits tensor, float - [batch_size, NUM_CLASSES].
      labels: Labels tensor, int32 - [batch_size].

    Returns:
      loss: Loss tensor of type float.
    """
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name = 'xentropy')
    loss = tf.reduce_mean(cross_entropy, name = 'xentropy_mean')
    return loss
nn_with_momentum.py 文件源码 项目:DataMining 作者: lidalei 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    """Calculates the loss from the logits and the labels.    
    Args:
      logits: Logits tensor, float - [batch_size, NUM_CLASSES].
      labels: Labels tensor, int32 - [batch_size].

    Returns:
      loss: Loss tensor of type float.
    """
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name = 'xentropy')
    loss = tf.reduce_mean(cross_entropy, name = 'xentropy_mean')
    return loss
ada_learning_rate_nn.py 文件源码 项目:DataMining 作者: lidalei 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    """Calculates the loss from the logits and the labels.    
    Args:
      logits: Logits tensor, float - [batch_size, NUM_CLASSES].
      labels: Labels tensor, int32 - [batch_size].

    Returns:
      loss: Loss tensor of type float.
    """
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name = 'xentropy')
    loss = tf.reduce_mean(cross_entropy, name = 'xentropy_mean')
    return loss
deterministic_discrete_mlp_q_function.py 文件源码 项目:rllabplusplus 作者: shaneshixiang 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def init_policy(self):
        output_vec = L.get_output(self._output_vec_layer, deterministic=True)
        action = tf.to_int64(tf.argmax(output_vec, 1))
        action_vec = tf.one_hot(action, self._n)
        max_qval = tf.reduce_max(output_vec, 1)

        self._f_actions = tensor_utils.compile_function([self._obs_layer.input_var], action)
        self._f_actions_vec = tensor_utils.compile_function([self._obs_layer.input_var], action_vec)
        self._f_max_qvals = tensor_utils.compile_function([self._obs_layer.input_var], max_qval)
deterministic_discrete_mlp_q_function.py 文件源码 项目:rllabplusplus 作者: shaneshixiang 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_action_sym(self, obs_var):
        output_vec = L.get_output(self._output_vec_layer, obs_var, deterministic=True)
        action = tf.to_int64(tf.argmax(output_vec, 1))
        action_vec = tf.one_hot(action, self._n)
        return action_vec
predictClass.py 文件源码 项目:hourglasstensorlfow 作者: wbenbihi 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _create_joint_tensor(self, tensor, name = 'joint_tensor',debug = False):
        """ TensorFlow Computation of Joint Position
        Args:
            tensor      : Prediction Tensor Shape [nbStack x 64 x 64 x outDim] or [64 x 64 x outDim]
            name        : name of the tensor
        Returns:
            out         : Tensor of joints position

        Comment:
            Genuinely Agreeing this tensor is UGLY. If you don't trust me, look at
            'prediction' node in TensorBoard.
            In my defence, I implement it to compare computation times with numpy.
        """
        with tf.name_scope(name):
            shape = tensor.get_shape().as_list()
            if debug:
                print(shape)
            if len(shape) == 3:
                resh = tf.reshape(tensor[:,:,0], [-1])
            elif len(shape) == 4:
                resh = tf.reshape(tensor[-1,:,:,0], [-1])
            if debug:
                print(resh)
            arg = tf.arg_max(resh,0)
            if debug:
                print(arg, arg.get_shape(), arg.get_shape().as_list())
            joints = tf.expand_dims(tf.stack([arg // tf.to_int64(shape[1]), arg % tf.to_int64(shape[1])], axis = -1), axis = 0)
            for i in range(1, shape[-1]):
                if len(shape) == 3:
                    resh = tf.reshape(tensor[:,:,i], [-1])
                elif len(shape) == 4:
                    resh = tf.reshape(tensor[-1,:,:,i], [-1])
                arg = tf.arg_max(resh,0)
                j = tf.expand_dims(tf.stack([arg // tf.to_int64(shape[1]), arg % tf.to_int64(shape[1])], axis = -1), axis = 0)
                joints = tf.concat([joints, j], axis = 0)
            return tf.identity(joints, name = 'joints')
eval.py 文件源码 项目:tf_datasets 作者: tmattio 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def main(args):
    # load the dataset
    mnist = tfd.get_dataset('mnist', FLAGS.data_dir)
    dataset = mnist.load('validation')

    # load batch
    images, labels = load_batch(
        dataset,
        FLAGS.batch_size)

    # get the model prediction
    predictions = lenet(images)

    # convert prediction values for each class into single class prediction
    predictions = tf.to_int64(tf.argmax(predictions, 1))

    # streaming metrics to evaluate
    metrics_to_values, metrics_to_updates = metrics.aggregate_metric_map({
        'mse': metrics.streaming_mean_squared_error(predictions, labels),
        'accuracy': metrics.streaming_accuracy(predictions, labels),
    })

    # write the metrics as summaries
    for metric_name, metric_value in metrics_to_values.iteritems():
        tf.summary.scalar(metric_name, metric_value)

    # evaluate on the model saved at the checkpoint directory
    # evaluate every eval_interval_secs
    slim.evaluation.evaluation_loop(
        '',
        FLAGS.checkpoint_dir,
        FLAGS.log_dir,
        num_evals=FLAGS.num_evals,
        eval_op=metrics_to_updates.values(),
        eval_interval_secs=FLAGS.eval_interval_secs)
tensorflow_backend.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def ctc_label_dense_to_sparse(labels, label_lengths):
    # undocumented feature soon to be made public
    from tensorflow.python.ops import functional_ops
    label_shape = tf.shape(labels)
    num_batches_tns = stack([label_shape[0]])
    max_num_labels_tns = stack([label_shape[1]])

    def range_less_than(previous_state, current_input):
        return tf.expand_dims(tf.range(label_shape[1]), 0) < tf.fill(max_num_labels_tns, current_input)

    init = tf.cast(tf.fill([1, label_shape[1]], 0), tf.bool)
    dense_mask = functional_ops.scan(range_less_than, label_lengths,
                                     initializer=init, parallel_iterations=1)
    dense_mask = dense_mask[:, 0, :]

    label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
                             label_shape)
    label_ind = tf.boolean_mask(label_array, dense_mask)

    batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]),
                                                  max_num_labels_tns), reverse(label_shape, 0)))
    batch_ind = tf.boolean_mask(batch_array, dense_mask)
    indices = tf.transpose(tf.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]))

    vals_sparse = tf.gather_nd(labels, indices)

    return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))
model.py 文件源码 项目:tensorflow_mnist_cloudml 作者: mainyaa 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def loss(logits, labels):
  """Calculates the loss from the logits and the labels.

  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size].
  Returns:
    loss: Loss tensor of type float.
  """
  labels = tf.to_int64(labels)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits, labels, name='xentropy')
  return tf.reduce_mean(cross_entropy, name='xentropy_mean')
model.py 文件源码 项目:tensorflow_mnist_cloudml 作者: mainyaa 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def loss(logits, labels):
  """Calculates the loss from the logits and the labels.

  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size].
  Returns:
    loss: Loss tensor of type float.
  """
  labels = tf.to_int64(labels)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits, labels, name='xentropy')
  return tf.reduce_mean(cross_entropy, name='xentropy_mean')
library.py 文件源码 项目:text2text 作者: google 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _reverse_seq(input_seq, lengths):
  """Reverse a list of Tensors up to specified lengths.

  Args:
    input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
    lengths:   A tensor of dimension batch_size, containing lengths for each
               sequence in the batch. If "None" is specified, simply reverses
               the list.

  Returns:
    time-reversed sequence
  """
  if lengths is None:
    return list(reversed(input_seq))

  input_shape = tensor_shape.matrix(None, None)
  for input_ in input_seq:
    input_shape.merge_with(input_.get_shape())
    input_.set_shape(input_shape)

  # Join into (time, batch_size, depth)
  s_joined = tf.stack(input_seq)

  if lengths is not None:
    lengths = tf.to_int64(lengths)

  # Reverse along dimension 0
  s_reversed = tf.reverse_sequence(s_joined, lengths, 0, 1)
  # Split again into list
  result = tf.unstack(s_reversed)
  for r in result:
    r.set_shape(input_shape)
  return result
mnist_eval.py 文件源码 项目:tf-slim-mnist 作者: mnuke 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def main(args):
    # load the dataset
    dataset = mnist.get_split('test', FLAGS.data_dir)

    # load batch
    images, labels = load_batch(
        dataset,
        FLAGS.batch_size,
        is_training=False)

    # get the model prediction
    predictions = lenet(images)

    # convert prediction values for each class into single class prediction
    predictions = tf.to_int64(tf.argmax(predictions, 1))

    # streaming metrics to evaluate
    metrics_to_values, metrics_to_updates = metrics.aggregate_metric_map({
        'mse': metrics.streaming_mean_squared_error(predictions, labels),
        'accuracy': metrics.streaming_accuracy(predictions, labels),
    })

    # write the metrics as summaries
    for metric_name, metric_value in metrics_to_values.iteritems():
        tf.summary.scalar(metric_name, metric_value)

    # evaluate on the model saved at the checkpoint directory
    # evaluate every eval_interval_secs
    slim.evaluation.evaluation_loop(
        '',
        FLAGS.checkpoint_dir,
        FLAGS.log_dir,
        num_evals=FLAGS.num_evals,
        eval_op=metrics_to_updates.values(),
        eval_interval_secs=FLAGS.eval_interval_secs)
mnist.py 文件源码 项目:TensorflowFramework 作者: vahidk 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def parse(mode, image, label):
    """Parse input record to features and labels."""
    image = tf.to_float(image)
    label = tf.to_int64(label)
    image = tf.image.per_image_standardization(image)
    return {"image": image}, {"label": label}
mnist.py 文件源码 项目:notmnist 作者: aidiary 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    """Calculates the loss from the logits and the labels."""
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits, labels, name='xentropy')
    loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
    return loss
train.py 文件源码 项目:tensorflow_kaggle_mnist 作者: Cuongvn08 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_loss(logit, label):
    label = tf.to_int64(label)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                                                        logits=logit,
                                                        labels=label,
                                                        name='cross_entropy')

    loss = tf.reduce_mean(cross_entropy)

    return loss

## get optimizer
# @param learning_rate:
# @param optimizer: optimizer method
mnist.py 文件源码 项目:DeepLearningAndTensorflow 作者: azheng333 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    """Calculates the loss from the logits and the labels.

    Args:
      logits: Logits tensor, float - [batch_size, NUM_CLASSES].
      labels: Labels tensor, int32 - [batch_size].

    Returns:
      loss: Loss tensor of type float.
    """
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=labels, logits=logits, name='xentropy')
    return tf.reduce_mean(cross_entropy, name='xentropy_mean')
topn.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_best(self, n):
    """Return the indices and values of the n highest scores in the TopN."""

    def refresh_shortlist():
      """Update the shortlist with the highest scores in id_to_score."""
      new_scores, new_ids = tf.nn.top_k(self.id_to_score, self.shortlist_size)
      smallest_new_score = tf.reduce_min(new_scores)
      new_length = tf.reduce_sum(
          tf.to_int32(tf.greater(new_scores, tf.float32.min)))
      u1 = self.sl_ids.assign(
          tf.to_int64(tf.concat(0, [[new_length], new_ids])))
      u2 = self.sl_scores.assign(
          tf.concat(0, [[smallest_new_score], new_scores]))
      self.last_ops = [u1, u2]
      return tf.group(u1, u2)

    # We only need to refresh the shortlist if n is greater than the
    # current shortlist size (which is stored in sl_ids[0]).
    with tf.control_dependencies(self.last_ops):
      cond_op = tf.cond(n > self.sl_ids[0], refresh_shortlist, tf.no_op)
      with tf.control_dependencies([cond_op]):
        topk_values, topk_indices = tf.nn.top_k(
            self.sl_scores, tf.minimum(n, tf.to_int32(self.sl_ids[0])))
        # topk_indices are the indices into the shortlist, we want to return
        # the indices into id_to_score
        gathered_indices = tf.gather(self.sl_ids, topk_indices)
        return gathered_indices, topk_values
gmm_ops.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _define_distance_to_clusters(self, data):
    """Defines the Mahalanobis distance to the assigned Gaussian."""
    # TODO(xavigonzalvo): reuse (input - mean) * cov^-1 * (input -
    # mean) from log probability function.
    self._all_scores = []
    for shard in data:
      all_scores = []
      shard = tf.expand_dims(shard, 0)
      for c in xrange(self._num_classes):
        if self._covariance_type == FULL_COVARIANCE:
          cov = self._covs[c, :, :]
        elif self._covariance_type == DIAG_COVARIANCE:
          cov = tf.diag(self._covs[c, :])
        inverse = tf.matrix_inverse(cov + self._min_var)
        inv_cov = tf.tile(
            tf.expand_dims(inverse, 0),
            tf.pack([self._num_examples, 1, 1]))
        diff = tf.transpose(shard - self._means[c, :, :], perm=[1, 0, 2])
        m_left = tf.batch_matmul(diff, inv_cov)
        all_scores.append(tf.sqrt(tf.batch_matmul(
            m_left, tf.transpose(diff, perm=[0, 2, 1])
        )))
      self._all_scores.append(tf.reshape(
          tf.concat(1, all_scores),
          tf.pack([self._num_examples, self._num_classes])))

    # Distance to the associated class.
    self._all_scores = tf.concat(0, self._all_scores)
    assignments = tf.concat(0, self.assignments())
    rows = tf.to_int64(tf.range(0, self._num_examples))
    indices = tf.concat(1, [tf.expand_dims(rows, 1),
                            tf.expand_dims(assignments, 1)])
    self._scores = tf.gather_nd(self._all_scores, indices)
topn.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def get_best(self, n):
    """Return the indices and values of the n highest scores in the TopN."""

    def refresh_shortlist():
      """Update the shortlist with the highest scores in id_to_score."""
      new_scores, new_ids = tf.nn.top_k(self.id_to_score, self.shortlist_size)
      smallest_new_score = tf.reduce_min(new_scores)
      new_length = tf.reduce_sum(
          tf.to_int32(tf.greater(new_scores, tf.float32.min)))
      u1 = self.sl_ids.assign(
          tf.to_int64(tf.concat(0, [[new_length], new_ids])))
      u2 = self.sl_scores.assign(
          tf.concat(0, [[smallest_new_score], new_scores]))
      self.last_ops = [u1, u2]
      return tf.group(u1, u2)

    # We only need to refresh the shortlist if n is greater than the
    # current shortlist size (which is stored in sl_ids[0]).
    with tf.control_dependencies(self.last_ops):
      cond_op = tf.cond(n > self.sl_ids[0], refresh_shortlist, tf.no_op)
      with tf.control_dependencies([cond_op]):
        topk_values, topk_indices = tf.nn.top_k(
            self.sl_scores, tf.minimum(n, tf.to_int32(self.sl_ids[0])))
        # topk_indices are the indices into the shortlist, we want to return
        # the indices into id_to_score
        gathered_indices = tf.gather(self.sl_ids, topk_indices)
        return gathered_indices, topk_values


问题


面经


文章

微信
公众号

扫码关注公众号