python类one_hot()的实例源码

train_catastrophe_model_human.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def model(self, features, labels):
        x = features["observation"]
        x = tf.contrib.layers.convolution2d(x, 2, kernel_size=[3, 3], stride=[2, 2], activation_fn=tf.nn.elu)
        x = tf.contrib.layers.convolution2d(x, 2, kernel_size=[3, 3], stride=[2, 2], activation_fn=tf.nn.elu)
        actions = tf.one_hot(tf.reshape(features["action"],[-1]), depth=6, on_value=1.0, off_value=0.0, axis=1)
        x = tf.concat(1, [tf.contrib.layers.flatten(x),  actions])
        x = tf.contrib.layers.fully_connected(x, 100, activation_fn=tf.nn.elu)
        x = tf.contrib.layers.fully_connected(x, 100, activation_fn=tf.nn.elu)
        logits = tf.contrib.layers.fully_connected(x, 1, activation_fn=None)
        prediction = tf.sigmoid(logits, name="prediction")
        loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits, tf.expand_dims(labels, axis=1)),name="loss")
        train_op = tf.contrib.layers.optimize_loss(
          loss, tf.contrib.framework.get_global_step(), optimizer='Adam',
          learning_rate=self.learning_rate)
        tf.add_to_collection('prediction', prediction)
        tf.add_to_collection('loss', loss)
        return prediction, loss, train_op
Gan.py 文件源码 项目:ICGan-tensorflow 作者: zhangqianhui 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def build_model4(self):

        self.weights3, self.biases3 = self.get_en_z_variables()
        self.weights4, self.biases4 = self.get_en_y_variables()

        self.e_z = self.encode_z(self.images, weights=self.weights3, biases=self.biases3)
        self.e_y = self.encode_y(self.images, weights=self.weights4, biases=self.biases4)

        #Changing y : + 1 or +2 or +3
        self.e_y = tf.one_hot(tf.arg_max(self.e_y, 1) + self.extend_value, 10)

        self.fake_images = self.generate(self.e_z, self.e_y, weights=self.weights1, biases=self.biases1)

        t_vars = tf.trainable_variables()

        self.g_vars = [var for var in t_vars if 'gen' in var.name]
        self.enz_vars = [var for var in t_vars if 'enz' in var.name]
        self.eny_vars = [var for var in t_vars if 'eny' in var.name]

        self.saver = tf.train.Saver(self.g_vars)

        self.saver_z = tf.train.Saver(self.g_vars + self.enz_vars)
        self.saver_y = tf.train.Saver(self.eny_vars)

    #do train
Gan.py 文件源码 项目:ICGan-tensorflow 作者: zhangqianhui 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def encode_z(self, x, weights, biases):

        c1 = tf.nn.relu(batch_normal(conv2d(x, weights['e1'], biases['eb1']), scope='enz_bn1'))

        c2 = tf.nn.relu(batch_normal(conv2d(c1, weights['e2'], biases['eb2']), scope='enz_bn2'))

        c2 = tf.reshape(c2, [self.batch_size, 128*7*7])

        #using tanh instead of tf.nn.relu.
        result_z = batch_normal(fully_connect(c2, weights['e3'], biases['eb3']), scope='enz_bn3')

        #result_c = tf.nn.sigmoid(fully_connect(c2, weights['e4'], biases['eb4']))

        #Transforming one-hot form
        #sparse_label = tf.arg_max(result_c, 1)

        #y_vec = tf.one_hot(sparse_label, 10)

        return result_z
character_rnn.py 文件源码 项目:deep-learning 作者: ljanyst 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def get_training_tensors(self, learning_rate = 0.001, grad_clip = 5):
        #-----------------------------------------------------------------------
        # Build a loss function
        #-----------------------------------------------------------------------
        with tf.name_scope('targets-encode'):
            y_one_hot  = tf.one_hot(self.targets, self.n_classes)
            y_reshaped = tf.reshape(y_one_hot, self.logits.get_shape())

        with tf.name_scope('loss'):
            loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits,
                                                           labels=y_reshaped)
            loss = tf.reduce_mean(loss)
            tf.summary.scalar('loss', loss)

        #-----------------------------------------------------------------------
        # Build the optimizer
        #-----------------------------------------------------------------------
        with tf.name_scope('optimizer'):
            tvars     = tf.trainable_variables()
            grads, _  = tf.clip_by_global_norm(tf.gradients(loss, tvars),
                                               grad_clip)
            train_op  = tf.train.AdamOptimizer(learning_rate)
            optimizer = train_op.apply_gradients(zip(grads, tvars))

        return loss, optimizer
a3_entity_network.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def smoothing_cross_entropy(self,logits, labels, vocab_size, confidence=0.9): #confidence = 1.0 - label_smoothing. where label_smooth=0.1. from http://github.com/tensorflow/tensor2tensor
        """Cross entropy with label smoothing to limit over-confidence."""
        with tf.name_scope("smoothing_cross_entropy", [logits, labels]):
            # Low confidence is given to all non-true labels, uniformly.
            low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
            # Normalizing constant is the best cross-entropy value with soft targets.
            # We subtract it just for readability, makes no difference on learning.
            normalizing = -(confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) * low_confidence * tf.log(low_confidence + 1e-20))
            # Soft targets.
            soft_targets = tf.one_hot(
                tf.cast(labels, tf.int32),
                depth=vocab_size,
                on_value=confidence,
                off_value=low_confidence)
            xentropy = tf.nn.softmax_cross_entropy_with_logits(
                logits=logits, labels=soft_targets)
        return xentropy - normalizing
a8_dynamic_memory_network.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def smoothing_cross_entropy(self,logits, labels, vocab_size, confidence=0.9): #confidence = 1.0 - label_smoothing. where label_smooth=0.1. from http://github.com/tensorflow/tensor2tensor
        """Cross entropy with label smoothing to limit over-confidence."""
        with tf.name_scope("smoothing_cross_entropy", [logits, labels]):
            # Low confidence is given to all non-true labels, uniformly.
            low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
            # Normalizing constant is the best cross-entropy value with soft targets.
            # We subtract it just for readability, makes no difference on learning.
            normalizing = -(confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) * low_confidence * tf.log(low_confidence + 1e-20))
            # Soft targets.
            soft_targets = tf.one_hot(
                tf.cast(labels, tf.int32),
                depth=vocab_size,
                on_value=confidence,
                off_value=low_confidence)
            xentropy = tf.nn.softmax_cross_entropy_with_logits(
                logits=logits, labels=soft_targets)
        return xentropy - normalizing
rpn_data.py 文件源码 项目:tf-image-interpreter 作者: ThoughtWorksInc 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _generate_labels(self, overlaps):
    labels = tf.Variable(tf.ones(shape=(tf.shape(overlaps)[0],), dtype=tf.float32) * -1, trainable=False,
                         validate_shape=False)
    gt_max_overlaps = tf.arg_max(overlaps, dimension=0)
    anchor_max_overlaps = tf.arg_max(overlaps, dimension=1)
    mask = tf.one_hot(anchor_max_overlaps, tf.shape(overlaps)[1], on_value=True, off_value=False)
    max_overlaps = tf.boolean_mask(overlaps, mask)
    if self._debug:
      max_overlaps = tf.Print(max_overlaps, [max_overlaps])
    labels = tf.scatter_update(labels, gt_max_overlaps, tf.ones((tf.shape(gt_max_overlaps)[0],)))
    # TODO: extract config object
    over_threshold_mask = tf.reshape(tf.where(max_overlaps > 0.5), (-1,))
    if self._debug:
      over_threshold_mask = tf.Print(over_threshold_mask, [over_threshold_mask], message='over threshold index : ')
    labels = tf.scatter_update(labels, over_threshold_mask, tf.ones((tf.shape(over_threshold_mask)[0],)))
    # TODO: support clobber positive in the origin implement
    below_threshold_mask = tf.reshape(tf.where(max_overlaps < 0.3), (-1,))
    if self._debug:
      below_threshold_mask = tf.Print(below_threshold_mask, [below_threshold_mask], message='below threshold index : ')
    labels = tf.scatter_update(labels, below_threshold_mask, tf.zeros((tf.shape(below_threshold_mask)[0],)))
    return labels
model.py 文件源码 项目:deeplab_v1_tf1.0 作者: automan000 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def prepare_label(self, input_batch, new_size):
        """Resize masks and perform one-hot encoding.

        Args:
          input_batch: input tensor of shape [batch_size H W 1].
          new_size: a tensor with new height and width.

        Returns:
          Outputs a tensor of shape [batch_size h w 21]
          with last dimension comprised of 0's and 1's only.
        """
        with tf.name_scope('label_encode'):
            input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # As labels are integer numbers, need to use NN interp.
            input_batch = tf.squeeze(input_batch, axis=[3]) # Reducing the channel dimension.
            input_batch = tf.one_hot(input_batch, depth=21)
        return input_batch
min_examp.py 文件源码 项目:TFExperiments 作者: gnperdue 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def parse_mnist_tfrec(tfrecord, features_shape):
    tfrecord_features = tf.parse_single_example(
        tfrecord,
        features={
            'features': tf.FixedLenFeature([], tf.string),
            'targets': tf.FixedLenFeature([], tf.string)
        }
    )
    features = tf.decode_raw(tfrecord_features['features'], tf.uint8)
    features = tf.reshape(features, features_shape)
    features = tf.cast(features, tf.float32)
    targets = tf.decode_raw(tfrecord_features['targets'], tf.uint8)
    targets = tf.reshape(targets, [])
    targets = tf.one_hot(indices=targets, depth=10, on_value=1, off_value=0)
    targets = tf.cast(targets, tf.float32)
    return features, targets
DataReaders.py 文件源码 项目:TFExperiments 作者: gnperdue 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def parse_mnist_tfrec(tfrecord, name, features_shape, scalar_targs=False):
    tfrecord_features = tf.parse_single_example(
        tfrecord,
        features={
            'features': tf.FixedLenFeature([], tf.string),
            'targets': tf.FixedLenFeature([], tf.string)
        },
        name=name+'_data'
    )
    with tf.variable_scope('features'):
        features = tf.decode_raw(
            tfrecord_features['features'], tf.uint8
        )
        features = tf.reshape(features, features_shape)
        features = tf.cast(features, tf.float32)
    with tf.variable_scope('targets'):
        targets = tf.decode_raw(tfrecord_features['targets'], tf.uint8)
        if scalar_targs:
            targets = tf.reshape(targets, [])
        targets = tf.one_hot(
            indices=targets, depth=10, on_value=1, off_value=0
        )
        targets = tf.cast(targets, tf.float32)
    return features, targets
common.py 文件源码 项目:HyperGAN 作者: 255BITS 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_lookup_table(self):
        if self.lookup is None:
            vocabulary = self.get_vocabulary()
            values = np.arange(len(vocabulary))
            lookup = {}

            if self.one_hot:
                for i, key in enumerate(vocabulary):
                    lookup[key]=self.np_one_hot(values[i], len(values))
            else:
                for i, key in enumerate(vocabulary):
                    lookup[key]=values[i]

            #reverse the hash
            lookup = {i[1]:i[0] for i in lookup.items()}
            self.lookup = lookup
        return self.lookup
common.py 文件源码 项目:HyperGAN 作者: 255BITS 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def sample_output(self, val):
        vocabulary = self.get_vocabulary()
        if self.one_hot:
            vals = [ np.argmax(r) for r in val ]
            ox_val = [vocabulary[obj] for obj in list(vals)]
            string = "".join(ox_val)
            return string
        else:
            val = np.reshape(val, [-1])
            val *= len(vocabulary)/2.0
            val += len(vocabulary)/2.0
            val = np.round(val)

            val = np.maximum(0, val)
            val = np.minimum(len(vocabulary)-1, val)

            ox_val = [self.get_character(obj) for obj in list(val)]
            string = "".join(ox_val)
            return string
multivariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _sample(self, n_samples):
        if self.logits.get_shape().ndims == 2:
            logits_flat = self.logits
        else:
            logits_flat = tf.reshape(self.logits, [-1, self.n_categories])
        samples_flat = tf.transpose(
            tf.multinomial(logits_flat, n_samples * self.n_experiments))
        shape = tf.concat([[n_samples, self.n_experiments],
                           self.batch_shape], 0)
        samples = tf.reshape(samples_flat, shape)
        static_n_samples = n_samples if isinstance(n_samples, int) else None
        static_n_exps = self.n_experiments if isinstance(self.n_experiments,
                                                         int) else None
        samples.set_shape(
            tf.TensorShape([static_n_samples, static_n_exps]).
            concatenate(self.get_batch_shape()))
        samples = tf.reduce_sum(
            tf.one_hot(samples, self.n_categories, dtype=self.dtype), axis=1)
        return samples
multivariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _sample(self, n_samples):
        if self.logits.get_shape().ndims == 2:
            logits_flat = self.logits
        else:
            logits_flat = tf.reshape(self.logits, [-1, self.n_categories])
        samples_flat = tf.transpose(tf.multinomial(logits_flat, n_samples))
        if self.logits.get_shape().ndims == 2:
            samples = samples_flat
        else:
            shape = tf.concat([[n_samples], self.batch_shape], 0)
            samples = tf.reshape(samples_flat, shape)
            static_n_samples = n_samples if isinstance(n_samples,
                                                       int) else None
            samples.set_shape(
                tf.TensorShape([static_n_samples]).
                concatenate(self.get_batch_shape()))
        samples = tf.one_hot(samples, self.n_categories, dtype=self.dtype)
        return samples
doc2vec_train_doc_prediction.py 文件源码 项目:kaggle_redefining_cancer_treatment 作者: jorgemf 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def doc2vec_prediction_model(input_vectors, input_gene, input_variation, output_label, batch_size,
                             is_training, embedding_size, output_classes):
    # inputs/outputs
    input_vectors = tf.reshape(input_vectors, [batch_size, embedding_size])
    input_gene = tf.reshape(input_gene, [batch_size, embedding_size])
    input_variation = tf.reshape(input_variation, [batch_size, embedding_size])
    targets = None
    if output_label is not None:
        output_label = tf.reshape(output_label, [batch_size, 1])
        targets = tf.one_hot(output_label, axis=-1, depth=output_classes, on_value=1.0,
                             off_value=0.0)
        targets = tf.squeeze(targets, axis=1)

    net = tf.concat([input_vectors, input_gene, input_variation], axis=1)
    net = layers.fully_connected(net, embedding_size * 2, activation_fn=tf.nn.relu)
    net = layers.dropout(net, keep_prob=0.85, is_training=is_training)
    net = layers.fully_connected(net, embedding_size, activation_fn=tf.nn.relu)
    net = layers.dropout(net, keep_prob=0.85, is_training=is_training)
    net = layers.fully_connected(net, embedding_size // 4, activation_fn=tf.nn.relu)
    logits = layers.fully_connected(net, output_classes, activation_fn=None)

    return logits, targets
network.py 文件源码 项目:PixelDCN 作者: HongyangGao 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def cal_loss(self):
        one_hot_labels = tf.one_hot(
            self.labels, depth=self.conf.class_num,
            axis=self.channel_axis, name='labels/one_hot')
        losses = tf.losses.softmax_cross_entropy(
            one_hot_labels, self.predictions, scope='loss/losses')
        self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
        self.decoded_preds = tf.argmax(
            self.predictions, self.channel_axis, name='accuracy/decode_pred')
        correct_prediction = tf.equal(
            self.labels, self.decoded_preds,
            name='accuracy/correct_pred')
        self.accuracy_op = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
            name='accuracy/accuracy_op')
        # weights = tf.cast(
        #     tf.greater(self.decoded_preds, 0, name='m_iou/greater'),
        #     tf.int32, name='m_iou/weights')
        weights = tf.cast(
            tf.less(self.labels, self.conf.channel, name='m_iou/greater'),
            tf.int64, name='m_iou/weights')
        labels = tf.multiply(self.labels, weights, name='m_iou/mul')
        self.m_iou, self.miou_op = tf.metrics.mean_iou(
            self.labels, self.decoded_preds, self.conf.class_num,
            weights, name='m_iou/m_ious')
lossFunction.py 文件源码 项目:dwt 作者: min2209 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def depthCELoss2(pred, gt, weight, ss, outputChannels=16):
    with tf.name_scope("depth_CE_loss"):
        pred = tf.reshape(pred, (-1, outputChannels))
        epsilon = tf.constant(value=1e-25)
        predSoftmax = tf.to_float(tf.nn.softmax(pred))

        gt = tf.one_hot(indices=tf.to_int32(tf.squeeze(tf.reshape(gt, (-1, 1)))), depth=outputChannels, dtype=tf.float32)
        ss = tf.to_float(tf.reshape(ss, (-1, 1)))
        weight = tf.to_float(tf.reshape(weight, (-1, 1)))

        crossEntropyScaling = tf.to_float([3.0, 3.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])

        crossEntropy = -tf.reduce_sum(((1-gt)*tf.log(tf.maximum(1-predSoftmax, epsilon))
                                       + gt*tf.log(tf.maximum(predSoftmax, epsilon)))*ss*crossEntropyScaling*weight,
                                      reduction_indices=[1])

        crossEntropySum = tf.reduce_sum(crossEntropy, name="cross_entropy_sum")
        return crossEntropySum
loss_function.py 文件源码 项目:dwt 作者: min2209 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def depthCELoss2(pred, gt, weight, ss, outputChannels=16):
    with tf.name_scope("depth_CE_loss"):
        pred = tf.reshape(pred, (-1, outputChannels))
        epsilon = tf.constant(value=1e-25)
        predSoftmax = tf.to_float(tf.nn.softmax(pred))

        gt = tf.one_hot(indices=tf.to_int32(tf.squeeze(tf.reshape(gt, (-1, 1)))), depth=outputChannels, dtype=tf.float32)
        ss = tf.to_float(tf.reshape(ss, (-1, 1)))
        weight = tf.to_float(tf.reshape(weight, (-1, 1)))

        crossEntropyScaling = tf.to_float([3.0, 3.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
        crossEntropy = -tf.reduce_sum(((1-gt)*tf.log(tf.maximum(1-predSoftmax, epsilon))
                                       + gt*tf.log(tf.maximum(predSoftmax, epsilon)))*ss*crossEntropyScaling*weight,
                                      reduction_indices=[1])

        crossEntropySum = tf.reduce_sum(crossEntropy, name="cross_entropy_sum")

        return crossEntropySum
model.py 文件源码 项目:tensorflow-deeplab-lfov 作者: DrSleep 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def prepare_label(self, input_batch, new_size):
        """Resize masks and perform one-hot encoding.

        Args:
          input_batch: input tensor of shape [batch_size H W 1].
          new_size: a tensor with new height and width.

        Returns:
          Outputs a tensor of shape [batch_size h w 21]
          with last dimension comprised of 0's and 1's only.
        """
        with tf.name_scope('label_encode'):
            input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # As labels are integer numbers, need to use NN interp.
            input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # Reducing the channel dimension.
            input_batch = tf.one_hot(input_batch, depth=21)
        return input_batch
network.py 文件源码 项目:3D_Dense_Transformer_Networks 作者: JohnYC1995 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def cal_loss(self):
        expand_annotations = tf.expand_dims(
            self.annotations, -1, name='annotations/expand_dims')
        one_hot_annotations = tf.squeeze(
            expand_annotations, axis=[self.channel_axis],
            name='annotations/squeeze')
        one_hot_annotations = tf.one_hot(
            one_hot_annotations, depth=self.conf.class_num,
            axis=self.channel_axis, name='annotations/one_hot')
        losses = tf.losses.softmax_cross_entropy(
            one_hot_annotations, self.predictions, scope='loss/losses')
        self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
        self.decoded_predictions = tf.argmax(
            self.predictions, self.channel_axis, name='accuracy/decode_pred')
        self.dice_accuracy_op, self.sub_dice_list = ops.dice_accuracy(self.decoded_predictions,\
                                self.annotations,self.conf.class_num)
        correct_prediction = tf.equal(
            self.annotations, self.decoded_predictions,
            name='accuracy/correct_pred')
        self.accuracy_op = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
            name='accuracy/accuracy_op')
utils.py 文件源码 项目:opinatt 作者: epochx 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _extract_argmax_and_one_hot(one_hot_size,
                                output_projection=None):
  """Get a loop_function that extracts the previous symbol and build a one-hot vector for it.

  Args:
    one_hot_size: total size of one-hot vector.
    output_projection: None or a pair (W, B). If provided, each fed previous
      output will first be multiplied by W and added B.
    update_embedding: Boolean; if False, the gradients will not propagate
      through the embeddings.

  Returns:
    A loop function.
  """
  def loop_function(prev, _):
    if output_projection is not None:
      prev = nn_ops.xw_plus_b(prev, output_projection[0], output_projection[1])
    prev_symbol = math_ops.argmax(prev, 1)
    # Note that gradients will not propagate through the second parameter of
    # embedding_lookup.
    emb_prev = tf.one_hot(prev_symbol, one_hot_size)
    return emb_prev

  return loop_function
loss.py 文件源码 项目:relaax 作者: deeplearninc 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def build_graph(self, actor, critic, cfg):
        self.ph_action = graph.Placeholder(np.int32, shape=(None,), name="ph_action")
        self.ph_advantage = graph.Placeholder(np.float32, shape=(None,), name="ph_adv")
        self.ph_discounted_reward = graph.Placeholder(np.float32, shape=(None,), name="ph_edr")

        action_one_hot = tf.one_hot(self.ph_action.node, actor.action_size)

        # avoid NaN
        log_pi = tf.log(tf.maximum(actor.node, 1e-20))

        # policy entropy
        self.entropy = -tf.reduce_sum(actor.node * log_pi)

        # policy loss
        self.policy_loss = -(tf.reduce_sum(tf.reduce_sum(log_pi * action_one_hot, axis=1) * self.ph_advantage.node)
                             + self.entropy * cfg.entropy_beta)

        # value loss
        self.value_loss = tf.reduce_sum(tf.square(self.ph_discounted_reward.node - critic.node))

        # gradient of policy and value are summed up
        # (Learning rate for the Critic is sized by critic_scale parameter)
        return self.policy_loss + cfg.critic_scale * self.value_loss
loss.py 文件源码 项目:relaax 作者: deeplearninc 项目源码 文件源码 阅读 80 收藏 0 点赞 0 评论 0
def build_graph(self, q_network, config):
        self.ph_reward = tf.placeholder(tf.float32, [None])
        self.ph_action = tf.placeholder(tf.int32, [None])
        self.ph_terminal = tf.placeholder(tf.int32, [None])
        self.ph_q_next_target = tf.placeholder(tf.float32, [None, config.output.action_size])
        self.ph_q_next = tf.placeholder(tf.float32, [None, config.output.action_size])

        action_one_hot = tf.one_hot(self.ph_action, config.output.action_size)
        q_action = tf.reduce_sum(tf.multiply(q_network.node, action_one_hot), axis=1)

        if config.double_dqn:
            q_max = tf.reduce_sum(self.ph_q_next_target * tf.one_hot(tf.argmax(self.ph_q_next, axis=1),
                                                                     config.output.action_size), axis=1)
        else:
            q_max = tf.reduce_max(self.ph_q_next_target, axis=1)

        y = self.ph_reward + tf.cast(1 - self.ph_terminal, tf.float32) * tf.scalar_mul(config.rewards_gamma, q_max)

        return tf.losses.absolute_difference(q_action, y)
rnn_test.py 文件源码 项目:tensorflow-deep-qa 作者: shuishen112 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def char_rnn_model(features, target):
  """Character level recurrent neural network model to predict classes."""
  target = tf.one_hot(target, 15, 1, 0)
  byte_list = tf.one_hot(features, 256, 1, 0)
  byte_list = tf.unstack(byte_list, axis=1)

  cell = tf.contrib.rnn.GRUCell(HIDDEN_SIZE)
  _, encoding = tf.contrib.rnn.static_rnn(cell, byte_list, dtype=tf.float32)

  logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
  loss = tf.contrib.losses.softmax_cross_entropy(logits, target)

  train_op = tf.contrib.layers.optimize_loss(
      loss,
      tf.contrib.framework.get_global_step(),
      optimizer='Adam',
      learning_rate=0.01)

  return ({
      'class': tf.argmax(logits, 1),
      'prob': tf.nn.softmax(logits)
  }, loss, train_op)
train.py 文件源码 项目:YOLO2TensorFlow 作者: PaulChongPeng 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def inference_sequential(image_batch):
    network_fn = nets_factory.get_network_fn(
        name=FLAGS.model_name,
        num_classes=FLAGS.num_classes,
        is_training=True,
        weight_decay=FLAGS.weight_decay,
        num_anchors=5)
    net, end_points = network_fn(image_batch)

    box_coordinate, box_confidence, box_class_probs = yolo_v2.yolo_v2_head(net, FLAGS.num_classes,
                                                                           [[1, 2], [1, 3], [2, 1], [3, 1], [1, 1]],
                                                                           True)

    # preds = tf.reduce_max(box_class_probs, 4)
    # preds = tf.one_hot(tf.cast(preds, tf.int32), FLAGS.num_classes)

    # return preds

    return box_coordinate, box_confidence, box_class_probs


# =========================================================================== #
# Main training routine.
# =========================================================================== #
train.py 文件源码 项目:ssd_tensorflow 作者: railsnoob 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def debug_train_setup(self):
        """ Use this SOLELY to figure out the size of the feature maps. """

        x = tf.placeholder(tf.float32,shape=(None,\
                                            self.cfg.g("image_height"),\
                                            self.cfg.g("image_width"),\
                                            self.cfg.g("n_channels")),\
                                            name='x')

        y = tf.placeholder(tf.int32,shape=(None,self.cfg.g("num_preds")),name='y')
        one_hot_y = tf.one_hot(y,10)

        loc, conf = self._net.graph(x)

        # This is just a placeholder cost
        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=conf,labels=y))

        optimizer = tf.train.AdamOptimizer(learning_rate=self.cfg.g("adam_learning_rate")).minimize(cost)
network.py 文件源码 项目:Unet_3D 作者: zhengyang-wang 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def cal_loss(self):
        one_hot_annotations = tf.one_hot(
            self.annotations, depth=self.conf.class_num,
            axis=self.channel_axis, name='annotations/one_hot')
        losses = tf.losses.softmax_cross_entropy(
            one_hot_annotations, self.predictions, scope='loss/losses')
        self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
        self.decoded_predictions = tf.argmax(
            self.predictions, self.channel_axis, name='accuracy/decode_pred')
        correct_prediction = tf.equal(
            self.annotations, self.decoded_predictions,
            name='accuracy/correct_pred')
        self.accuracy_op = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
            name='accuracy/accuracy_op')
        self.softmax_predictions = tf.nn.softmax(self.predictions)
utils.py 文件源码 项目:tensorflow-deeplab-resnet 作者: DrSleep 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def prepare_label(input_batch, new_size, num_classes, one_hot=True):
    """Resize masks and perform one-hot encoding.

    Args:
      input_batch: input tensor of shape [batch_size H W 1].
      new_size: a tensor with new height and width.
      num_classes: number of classes to predict (including background).
      one_hot: whether perform one-hot encoding.

    Returns:
      Outputs a tensor of shape [batch_size h w 21]
      with last dimension comprised of 0's and 1's only.
    """
    with tf.name_scope('label_encode'):
        input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # as labels are integer numbers, need to use NN interp.
        input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # reducing the channel dimension.
        if one_hot:
            input_batch = tf.one_hot(input_batch, depth=num_classes)
    return input_batch
tf_models.py 文件源码 项目:dataset 作者: analysiscenter 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _build(self, inputs, *args, **kwargs):
        #images_shape = self.get_from_config('images_shape', (12, 12, 1))
        #num_classes = self.get_from_config('num_classes', 3)

        #x = tf.placeholder("float", [None] + list(images_shape), name='x')
        #y = tf.placeholder("int32",[None], name='y')
        #y_oe = tf.one_hot(y, num_classes, name='targets')

        c = conv2d_block(inputs['x'], 3, 3, conv=dict(kernel_initializer=tf.contrib.layers.xavier_initializer()), max_pooling=dict(strides=4))
        f = tf.reduce_mean(c, [1,2])
        y_ = tf.identity(f, name='predictions')

        # Define a cost function
        #tf.losses.add_loss(tf.losses.softmax_cross_entropy(y_oe, y_))
        #loss = tf.losses.softmax_cross_entropy(y_oe, y_)
        #self.train_step = tf.train.AdamOptimizer().minimize(loss)
        #print(c.shape)

        print("___________________ MyModel initialized")
mnist_model.py 文件源码 项目:dataset 作者: analysiscenter 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def static_nn():
        input_images = tf.placeholder("uint8", [None, 28, 28, 1])
        input_labels = tf.placeholder("uint8", [None])

        input_vectors = tf.cast(tf.reshape(input_images, [-1, 28 * 28]), 'float')
        layer1 = tf.layers.dense(input_vectors, units=512, activation=tf.nn.relu)
        layer2 = tf.layers.dense(layer1, units=256, activation=tf.nn.relu)
        model_output = tf.layers.dense(layer2, units=10)
        encoded_labels = tf.one_hot(input_labels, depth=10)

        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=encoded_labels, logits=model_output))
        optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)

        prediction = tf.argmax(model_output, 1)
        correct_prediction = tf.equal(prediction, tf.argmax(encoded_labels, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

        return [[input_images, input_labels], [optimizer, cost, accuracy]]


问题


面经


文章

微信
公众号

扫码关注公众号