python类one_hot()的实例源码

losses.py 文件源码 项目:deepmodels 作者: learningsociety 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def clf_loss_oneclass(pred_logits, gt_labels, cls_num):
  """Compute classification loss for oneclass problem.

  Args:
    pred_logits: logits prediction from a model.
    gt_labels: ground truth class labels.
    cls_num: number of classes.
  Returns:
    computed loss.
  """
  with tf.variable_scope("clf_loss"):
    tf.assert_equal(tf.reduce_max(gt_labels), tf.convert_to_tensor(cls_num))
    onehot_labels = tf.one_hot(gt_labels, cls_num)
    clf_loss_elem = tf.losses.softmax_cross_entropy(onehot_labels, pred_logits)
    mean_loss = tf.reduce_mean(clf_loss_elem, 0)
  return mean_loss
test_seq2seq.py 文件源码 项目:deep_learning 作者: wecliqued 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_create_cell(self):
        seq2seq = self.seq2seq

        # we will use one hot encoding of the input batch, this is how it is constructed
        # we will use 0 for padding so our vocabulary size will increase by one
        vocab_len = len(seq2seq.vocab)
        depth = vocab_len + 1
        no_stacked_cells = self.no_stacked_cells
        hidden_size = self.hidden_size

        seq = tf.placeholder(dtype=tf.int32, shape=[None, None])
        one_hot_seq = tf.one_hot(seq, depth=depth)
        self.assertHasShape(one_hot_seq, [None, None, depth])

        # creates cell using seq as input batch placeholder
        cell, in_state = seq2seq._create_cell(one_hot_seq, no_stacked_cells)
        self.assertIsInstance(cell, tf.contrib.rnn.MultiRNNCell)
        self.assertEqual(len(in_state), no_stacked_cells)
        for state in in_state:
            self.assertHasShape(state, [None, hidden_size])

        # before calling __call__ on cell, internal variables are not created
        # not much we can test right now
        self.assertListEqual(tf.trainable_variables(), [])
likelihoods.py 文件源码 项目:GPflow 作者: GPflow 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def prob_is_largest(self, Y, mu, var, gh_x, gh_w):
        Y = tf.cast(Y, tf.int64)
        # work out what the mean and variance is of the indicated latent function.
        oh_on = tf.cast(tf.one_hot(tf.reshape(Y, (-1,)), self.num_classes, 1., 0.), settings.float_type)
        mu_selected = tf.reduce_sum(oh_on * mu, 1)
        var_selected = tf.reduce_sum(oh_on * var, 1)

        # generate Gauss Hermite grid
        X = tf.reshape(mu_selected, (-1, 1)) + gh_x * tf.reshape(
            tf.sqrt(tf.clip_by_value(2. * var_selected, 1e-10, np.inf)), (-1, 1))

        # compute the CDF of the Gaussian between the latent functions and the grid (including the selected function)
        dist = (tf.expand_dims(X, 1) - tf.expand_dims(mu, 2)) / tf.expand_dims(
            tf.sqrt(tf.clip_by_value(var, 1e-10, np.inf)), 2)
        cdfs = 0.5 * (1.0 + tf.erf(dist / np.sqrt(2.0)))

        cdfs = cdfs * (1 - 2e-4) + 1e-4

        # blank out all the distances on the selected latent function
        oh_off = tf.cast(tf.one_hot(tf.reshape(Y, (-1,)), self.num_classes, 0., 1.), settings.float_type)
        cdfs = cdfs * tf.expand_dims(oh_off, 2) + tf.expand_dims(oh_on, 2)

        # take the product over the latent functions, and the sum over the GH grid.
        return tf.matmul(tf.reduce_prod(cdfs, reduction_indices=[1]), tf.reshape(gh_w / np.sqrt(np.pi), (-1, 1)))
tf_core.py 文件源码 项目:sparks 作者: ImpactHorizon 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    labels = tf.cast(labels, tf.int64)  
    batch_size = logits.get_shape()[0].value  
    weights = tf.constant(batch_size*[H_FACTOR, T_FACTOR], tf.float32, 
                            shape=logits.get_shape())
    softmax = tf.nn.softmax(logits)
    softmax = tf.clip_by_value(softmax, 1e-10, 0.999999)

    with tf.device('/cpu:0'):
        targets = tf.one_hot(labels, depth=2)

    cross_entropy = -tf.reduce_mean(weights*targets*tf.log(softmax) + 
                                        weights*(1-targets)*tf.log(1-softmax), 
                                        reduction_indices=[1])    
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses', cross_entropy_mean)

    return tf.add_n(tf.get_collection('losses'), name='total_loss')
dqn.py 文件源码 项目:dqn 作者: elix-tech 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def build_training_op(self, q_network_weights):
        a = tf.placeholder(tf.int64, [None])
        y = tf.placeholder(tf.float32, [None])

        # Convert action to one hot vector
        a_one_hot = tf.one_hot(a, self.num_actions, 1.0, 0.0)
        q_value = tf.reduce_sum(tf.mul(self.q_values, a_one_hot), reduction_indices=1)

        # Clip the error, the loss is quadratic when the error is in (-1, 1), and linear outside of that region
        error = tf.abs(y - q_value)
        quadratic_part = tf.clip_by_value(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = tf.reduce_mean(0.5 * tf.square(quadratic_part) + linear_part)

        optimizer = tf.train.RMSPropOptimizer(LEARNING_RATE, momentum=MOMENTUM, epsilon=MIN_GRAD)
        grad_update = optimizer.minimize(loss, var_list=q_network_weights)

        return a, y, loss, grad_update
metrics.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def metric(self, predictions, targets, num_classes=None, batch_size=None, **kwargs):
        """
        Computes Kappa metric

        Args:
            predictions: 2D tensor/array, predictions of the network
            targets: 2D tensor/array, ground truth labels of the network
            num_classes: int, num_classes of the network
            batch_size: batch_size for predictions of the network

        Returns:
            Kappa score
        """
        if num_classes is None:
            num_classes = self.num_classes
        if batch_size is None:
            batch_size = self.batch_size
        targets = tf.convert_to_tensor(targets)
        predictions = tf.convert_to_tensor(predictions)
        if targets.get_shape().ndims == 1:
            targets = tf.one_hot(targets, num_classes, on_value=1, off_value=0)
        if predictions.get_shape().ndims == 1:
            predictions = tf.one_hot(
                predictions, num_classes, on_value=1, off_value=0)
        return self._kappa_loss(predictions, targets, batch_size=batch_size, num_ratings=num_classes, **kwargs)
metrics.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def metric(self, predictions, targets, num_classes=5):
        """
        Computes auroc metric

        Args:
            predictions: 2D tensor/array, predictions of the network
            targets: 2D tensor/array, ground truth labels of the network
            num_classes: int, num_classes of the network

        Returns:
            auroc score
        """
        if targets.ndim == 2:
            targets = np.argmax(targets, axis=1)
        if predictions.ndim == 1:
            predictions = one_hot(predictions, m=num_classes)
        return self._auroc(predictions, targets)
metrics.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def accuracy_op(predictions, targets, num_classes=5):
    """
    Computes accuracy metric

    Args:
        predictions: 2D tensor/array, predictions of the network
        targets: 2D tensor/array, ground truth labels of the network
        num_classes: int, num_classes of the network

    Returns:
        accuracy
    """
    with tf.name_scope('Accuracy'):
        if targets.ndim == 2:
            targets = np.argmax(targets, axis=1)
        if predictions.ndim == 1:
            predictions = one_hot(predictions, m=num_classes)
        acc = accuracy_score(targets, np.argmax(predictions, axis=1))
    return acc
base.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _sparse_loss_softmax(self, logits, labels, is_training, weighted=False):
        log.info('Using sparse softmax loss')
        labels = tf.cast(labels, tf.int64)
        if weighted:
            if tf.rank(labels) != 2:
                labels = tf.one_hot(labels, self.num_classes)
            weights = self._compute_weights(labels)
            weights = tf.reduce_max(tf.multiply(weights, labels), axis=1)
            ce_loss = tf.losses.sparse_softmax_cross_entropy(
                tf.argmax(labels, axis=1), logits=logits, weights=weights, scope='cross_entropy_loss')
        else:
            ce_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                labels=labels, logits=logits, name='cross_entropy_loss')
        ce_loss_mean = tf.reduce_mean(ce_loss, name='cross_entropy')
        if is_training:
            tf.add_to_collection('losses', ce_loss_mean)

            l2_loss = tf.add_n(tf.get_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES))
            l2_loss = l2_loss * self.cnf.get('l2_reg', 0.0)
            tf.add_to_collection('losses', l2_loss)

            return tf.add_n(tf.get_collection('losses'), name='total_loss')
        else:
            return ce_loss_mean
base.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _loss_softmax(self, logits, labels, is_training, weighted=False):
        log.info('Using softmax loss')
        labels = tf.cast(labels, tf.int64)
        if tf.rank(labels) != 2:
            labels = tf.one_hot(labels, self.num_classes)
        if weighted:
            weights = self._compute_weights(labels)
            weights = tf.reduce_max(tf.multiply(weights, labels), axis=1)
            ce_loss = tf.losses.softmax_cross_entropy(
                labels, logits=logits, weights=weights, label_smoothing=self.label_smoothing, scope='cross_entropy_loss')
        else:
            ce_loss = tf.nn.softmax_cross_entropy_with_logits(
                labels=labels, logits=logits, name='cross_entropy_loss')
        ce_loss_mean = tf.reduce_mean(ce_loss, name='cross_entropy')
        if is_training:
            tf.add_to_collection('losses', ce_loss_mean)

            l2_loss = tf.add_n(tf.get_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES))
            l2_loss = l2_loss * self.cnf.get('l2_reg', 0.0)
            tf.add_to_collection('losses', l2_loss)

            return tf.add_n(tf.get_collection('losses'), name='total_loss')
        else:
            return ce_loss_mean
base.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _loss_sigmoid(self, logits, labels, is_training, weighted=False):
        log.info('Using sigmoid loss')
        labels = tf.cast(labels, tf.int64)
        if tf.rank(labels) != 2:
            labels = tf.one_hot(labels, self.num_classes)
        if weighted:
            weights = self._compute_weights(labels)
            ce_loss = tf.losses.sigmoid_cross_entropy(
                labels, logits=logits, weights=weights, label_smoothing=self.label_smoothing, scope='sigmoid_cross_entropy_loss')
        else:
            ce_loss = tf.nn.sigmoid_cross_entropy_with_logits(
                labels=labels, logits=logits, name='sigmoid_cross_entropy_loss')
        ce_loss_mean = tf.reduce_mean(ce_loss, name='sigmoid_cross_entropy')
        if is_training:
            tf.add_to_collection('losses', ce_loss_mean)

            l2_loss = tf.add_n(tf.get_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES))
            l2_loss = l2_loss * self.cnf.get('l2_reg', 0.0)
            tf.add_to_collection('losses', l2_loss)

            return tf.add_n(tf.get_collection('losses'), name='total_loss')
        else:
            return ce_loss_mean
a3C.py 文件源码 项目:A3C 作者: go2sea 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def actor_loss(self):
        if self.config.mode == 'discrete':
            log_prob = tf.reduce_sum(tf.log(self.a_prob) * tf.one_hot(self.action_input, self.action_dim, dtype=tf.float32),
                                     axis=1, keep_dims=True)
            # use entropy to encourage exploration
            exp_v = log_prob * self.TD_loss
            entropy = -tf.reduce_sum(self.a_prob * tf.log(self.a_prob), axis=1, keep_dims=True)  # encourage exploration
            exp_v = self.config.ENTROPY_BETA * entropy + exp_v
            return tf.reduce_mean(-exp_v)  # ????????log_prb????????????????????TD_loss
        elif self.config.mode == 'continuous':
            log_prob = self.action_normal_dist.log_prob(self.action_input)
            exp_v = log_prob * self.TD_loss
            # use entropy to encourage exploration
            exp_v = self.config.ENTROPY_BETA * self.action_normal_dist.entropy() + exp_v
            return tf.reduce_mean(-exp_v)
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def calculate_loss_mix(self, predictions, predictions_class, labels, **unused_params):
    with tf.name_scope("loss_mix"):
      float_labels = tf.cast(labels, tf.float32)
      if FLAGS.support_type=="class":
        seq = np.loadtxt(FLAGS.class_file)
        tf_seq = tf.one_hot(tf.constant(seq,dtype=tf.int32),FLAGS.encoder_size)
        float_classes_org = tf.matmul(float_labels,tf_seq)
        class_true = tf.ones(tf.shape(float_classes_org))
        class_false = tf.zeros(tf.shape(float_classes_org))
        float_classes = tf.where(tf.greater(float_classes_org, class_false), class_true, class_false)
        cross_entropy_class = self.calculate_loss(predictions_class,float_classes)
      elif FLAGS.support_type=="frequent":
        float_classes = float_labels[:,0:FLAGS.encoder_size]
        cross_entropy_class = self.calculate_loss(predictions_class,float_classes)
      elif FLAGS.support_type=="encoder":
        float_classes = float_labels
        for i in range(FLAGS.encoder_layers):
          var_i = np.loadtxt(FLAGS.autoencoder_dir+'autoencoder_layer%d.model' % i)
          weight_i = tf.constant(var_i[:-1,:],dtype=tf.float32)
          bias_i = tf.reshape(tf.constant(var_i[-1,:],dtype=tf.float32),[-1])
          float_classes = tf.nn.xw_plus_b(float_classes,weight_i,bias_i)
          if i<FLAGS.encoder_layers-1:
            float_classes = tf.nn.relu(float_classes)
          else:
            float_classes = tf.nn.sigmoid(float_classes)
            #float_classes = tf.nn.relu(tf.sign(float_classes - 0.5))
        cross_entropy_class = self.calculate_mseloss(predictions_class,float_classes)
      else:
        float_classes = float_labels
        for i in range(FLAGS.moe_layers-1):
          float_classes = tf.concat((float_classes,float_labels),axis=1)
        cross_entropy_class = self.calculate_loss(predictions_class,float_classes)
      cross_entropy_loss = self.calculate_loss(predictions,labels)
      return cross_entropy_loss + 0.1*cross_entropy_class
labels_embedding.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def create_model(self, model_input, vocab_size, l2_penalty=1e-8, **unused_params):
        """Creates a logistic model.

        Args:
          model_input: 'batch' x 'num_features' matrix of input features.
          vocab_size: The number of classes in the dataset.

        Returns:
          A dictionary with a tensor containing the probability predictions of the
          model in the 'predictions' key. The dimensions of the tensor are
          batch_size x num_classes."""
        model_input = tf.cast(model_input,dtype=tf.float32)
        hidden_size = FLAGS.hidden_size

        model_mask, indices_input = tf.nn.top_k(model_input, k=FLAGS.top_k)
        indices_input = tf.reshape(indices_input, [-1])
        models_mask = tf.reshape(model_mask, [-1,FLAGS.top_k,1])
        with tf.name_scope("embedding"):
            embeddings = tf.Variable(
                tf.random_uniform([vocab_size, hidden_size], -1.0, 1.0))
            embed = tf.nn.embedding_lookup(embeddings, indices_input)
            output = slim.fully_connected(
                embed,
                vocab_size,
                activation_fn=tf.nn.sigmoid,
                weights_regularizer=slim.l2_regularizer(l2_penalty),
                scope="output")
        indices_one_hot = tf.one_hot(indices_input, vocab_size)
        output = output * (1 - indices_one_hot) + indices_one_hot
        output_val = tf.reshape(output,[-1,FLAGS.top_k,vocab_size])
        predictions_val = tf.reduce_sum(output_val*models_mask, axis=1)/tf.reduce_sum(models_mask, axis=1)
        return {"predictions": output, "predictions_val": predictions_val}
model.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def categorical_sample(logits, d):
    value = tf.squeeze(tf.multinomial(logits - tf.reduce_max(logits, [1], keep_dims=True), 1), [1])
    return tf.one_hot(value, d)
model.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def categorical_max(logits, d):
    value = tf.argmax(logits - tf.reduce_max(logits, [1], keep_dims=True), axis=1)
    return tf.one_hot(value, d)
model.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def categorical_sample(logits, d):
    value = tf.squeeze(tf.multinomial(logits - tf.reduce_max(logits, [1], keep_dims=True), 1), [1])
    return tf.one_hot(value, d)
model.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def categorical_max(logits, d):
    value = tf.argmax(logits - tf.reduce_max(logits, [1], keep_dims=True), axis=1)
    return tf.one_hot(value, d)
Gan.py 文件源码 项目:ICGan-tensorflow 作者: zhangqianhui 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def encode_y(self, x, weights, biases):

        c1 = tf.nn.relu(batch_normal(conv2d(x, weights['e1'], biases['eb1']), scope='eny_bn1'))

        c2 = tf.nn.relu(batch_normal(conv2d(c1, weights['e2'], biases['eb2']), scope='eny_bn2'))

        c2 = tf.reshape(c2, [self.batch_size, 128 * 7 * 7])

        result_y = tf.nn.sigmoid(fully_connect(c2, weights['e3'], biases['eb3']))

        #y_vec = tf.one_hot(tf.arg_max(result_y, 1), 10)

        return result_y
beam_search.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def mask_probs(probs, eos_token, finished):
  """Masks log probabilities such that finished beams
  allocate all probability mass to eos. Unfinished beams remain unchanged.

  Args:
    probs: Log probabiltiies of shape `[beam_width, vocab_size]`
    eos_token: An int32 id corresponding to the EOS token to allocate
      probability to
    finished: A boolean tensor of shape `[beam_width]` that specifies which
      elements in the beam are finished already.

  Returns:
    A tensor of shape `[beam_width, vocab_size]`, where unfinished beams
    stay unchanged and finished beams are replaced with a tensor that has all
    probability on the EOS token.
  """
  vocab_size = tf.shape(probs)[1]
  finished_mask = tf.expand_dims(tf.to_float(1. - tf.to_float(finished)), 1)
  # These examples are not finished and we leave them
  non_finished_examples = finished_mask * probs
  # All finished examples are replaced with a vector that has all
  # probability on EOS
  finished_row = tf.one_hot(
      eos_token,
      vocab_size,
      dtype=tf.float32,
      on_value=0.,
      off_value=tf.float32.min)
  finished_examples = (1. - finished_mask) * finished_row
  return finished_examples + non_finished_examples


问题


面经


文章

微信
公众号

扫码关注公众号