python类placeholder_with_default()的实例源码

seq2seq_solution.py 文件源码 项目:deep_learning 作者: wecliqued 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _create_cell(self, seq, no_stacked_cells):
        """
        Creates GRU cell
        :param seq: placeholder of the input batch
        :return: cell and placeholder for its internal state
        """
        batch_size = tf.shape(seq)[0]
        # Since around May 2017, there is new way of constructing MultiRNNCell
        cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.GRUCell(self.hidden_size) for _ in range(no_stacked_cells)])
        multi_cell_zero_state = cell.zero_state(batch_size, tf.float32)
        in_state_shape = tuple([None, self.hidden_size] for _ in range(no_stacked_cells))
        in_state = tuple(tf.placeholder_with_default(cell_zero_state, [None, self.hidden_size], name='in_state') for cell_zero_state in multi_cell_zero_state)
        return cell, in_state
seq2seq.py 文件源码 项目:deep_learning 作者: wecliqued 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _create_cell(self, seq, no_stacked_cells):
        """
        Creates GRU cell
        :param seq: placeholder of the input batch
        :return: cell and placeholder for its internal state
        """
        batch_size = tf.shape(seq)[0]

        ##########################################################################################################
        #
        # TODO: Create a stacked MultiRNNCell from GRU cells
        #       First, you have to use tf.contrib.rnn.GRUCell() to construct cells
        #       Since around May 2017, there is new way of constructing MultiRNNCell and you need to create
        #       one cell for each layer. Old code snippets that used [cell * no_stacked_cells] that you can
        #       find online might not work with the latest Tensorflow
        #
        #       After construction GRUCell objects, use it to construct tf.contrib.rnn.MultiRNNCell().
        #
        # YOUR CODE BEGIN
        #
        ##########################################################################################################

        cell = None # you

        ##########################################################################################################
        #
        # YOUR CODE END
        #
        ##########################################################################################################

        multi_cell_zero_state = cell.zero_state(batch_size, tf.float32)
        in_state_shape = tuple([None, self.hidden_size] for _ in range(no_stacked_cells))
        in_state = tuple(tf.placeholder_with_default(cell_zero_state, [None, self.hidden_size], name='in_state') for cell_zero_state in multi_cell_zero_state)

        return cell, in_state
supervised_train.py 文件源码 项目:GraphSAGE 作者: williamleif 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def construct_placeholders(num_classes):
    # Define placeholders
    placeholders = {
        'labels' : tf.placeholder(tf.float32, shape=(None, num_classes), name='labels'),
        'batch' : tf.placeholder(tf.int32, shape=(None), name='batch1'),
        'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'),
        'batch_size' : tf.placeholder(tf.int32, name='batch_size'),
    }
    return placeholders
unsupervised_train.py 文件源码 项目:GraphSAGE 作者: williamleif 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def construct_placeholders():
    # Define placeholders
    placeholders = {
        'batch1' : tf.placeholder(tf.int32, shape=(None), name='batch1'),
        'batch2' : tf.placeholder(tf.int32, shape=(None), name='batch2'),
        # negative samples for all nodes in the batch
        'neg_samples': tf.placeholder(tf.int32, shape=(None,),
            name='neg_sample_size'),
        'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'),
        'batch_size' : tf.placeholder(tf.int32, name='batch_size'),
    }
    return placeholders
learningv2.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _process_towers_grads(self, dataset, opt, model, is_training=True, reuse=None, loss_type='cross_entropy', is_classification=True):
        tower_grads = []
        tower_loss = []
        self.target_probs = tf.placeholder_with_default(tf.convert_to_tensor([1 / float(self.num_classes) for _ in range(0, self.num_classes)]),
                                                        shape=[self.num_classes, ], name="target_probs")
        with tf.variable_scope(tf.get_variable_scope()):
            for i in xrange(self.cnf.get('num_gpus', 1)):
                with tf.device('/gpu:%d' % i):
                    with tf.name_scope('%s_%d' % (self.cnf.get('TOWER_NAME', 'tower'), i)) as scope:
                        images, labels = distorted_inputs(dataset, self.cnf['tfrecords_im_size'], self.cnf.get(
                            'crop_size'), batch_size=self.cnf['batch_size_train'], num_preprocess_threads=32, num_readers=8, target_probs=self.target_probs, init_probs=tf.convert_to_tensor(self.cnf['init_probs']), image_preprocessing=self.preprocessor.preprocess_image, data_balancing=self.data_balancing)
                        labels = self._adjust_ground_truth(labels)
                        loss = self._tower_loss(scope, model, images, labels, is_training=is_training,
                                                reuse=i > 0, is_classification=is_classification, gpu_id=i, loss_type=loss_type)

                        tf.get_variable_scope().reuse_variables()
                        if self.clip_by_global_norm:
                            grads_and_vars = self._clip_grad_global_norms(tf.trainable_variables(
                            ), loss, opt, global_norm=self.norm_threshold, gradient_noise_scale=0.0)
                        else:
                            grads_and_vars = opt.compute_gradients(loss)
                        tower_grads.append(grads_and_vars)
                        tower_loss.append(loss)

        grads_and_vars = self._average_gradients(tower_grads)

        return grads_and_vars, sum(tower_loss)
transferLearningV2.py 文件源码 项目:PlantImageRecognition 作者: HeavenMin 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def finalTrainingLayer(classCount, finalTensorName, bottleneckTensor):
    with tf.name_scope('input'):
        bottleneckInput = tf.placeholder_with_default(
            bottleneckTensor, shape = [None, BOTTLENECK_TENSOR_SIZE],
            name = 'BottleneckInputPlaceholder')

    groundTruthInput = tf.placeholder(tf.float32,
                                      [None, classCount],
                                      name = 'GroundTruthInput')
    layerName = 'finalLayer'
    with tf.name_scope(layerName):
        with tf.name_scope('weights'):
            initialValue = tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, classCount],
                                               stddev=0.001)
            layerWeights = tf.Variable(initialValue, name = 'finalWeights')
            tensorBoardUsage(layerWeights)
        with tf.name_scope('biases'):
            layerBiases = tf.Variable(tf.zeros([classCount]), name='finalBiases')
            tensorBoardUsage(layerBiases)
        with tf.name_scope('WxPlusB'):
            logits = tf.matmul(bottleneckInput, layerWeights) + layerBiases
            tf.summary.histogram('pre_activations', logits)

    finalTensor = tf.nn.softmax(logits, name=finalTensorName)
    tf.summary.histogram('activations', finalTensor)

    with tf.name_scope('crossEntropy'):
        crossEntropy = tf.nn.softmax_cross_entropy_with_logits(
                       labels=groundTruthInput, logits=logits)
        with tf.name_scope('total'):
            crossEntropyMean = tf.reduce_mean(crossEntropy)
    tf.summary.scalar('cross_entropy', crossEntropyMean)

    with tf.name_scope('train'):
        optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
        trainStep = optimizer.minimize(crossEntropyMean)

    return (trainStep, crossEntropyMean, bottleneckInput, groundTruthInput,
            finalTensor)
transferLearningV3.py 文件源码 项目:PlantImageRecognition 作者: HeavenMin 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def finalTrainingLayer(classCount, finalTensorName, bottleneckTensor):
    with tf.name_scope('input'):
        bottleneckInput = tf.placeholder_with_default(
            bottleneckTensor, shape = [None, BOTTLENECK_TENSOR_SIZE],
            name = 'BottleneckInputPlaceholder')

    groundTruthInput = tf.placeholder(tf.float32,
                                      [None, classCount],
                                      name = 'GroundTruthInput')
    layerName = 'finalLayer'
    with tf.name_scope(layerName):
        with tf.name_scope('weights'):
            initialValue = tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, classCount],
                                               stddev=0.001)
            layerWeights = tf.Variable(initialValue, name = 'finalWeights')
            tensorBoardUsage(layerWeights)
        with tf.name_scope('biases'):
            layerBiases = tf.Variable(tf.zeros([classCount]), name='finalBiases')
            tensorBoardUsage(layerBiases)
        with tf.name_scope('WxPlusB'):
            logits = tf.matmul(bottleneckInput, layerWeights) + layerBiases
            tf.summary.histogram('pre_activations', logits)

    finalTensor = tf.nn.softmax(logits, name=finalTensorName)
    tf.summary.histogram('activations', finalTensor)

    with tf.name_scope('crossEntropy'):
        crossEntropy = tf.nn.softmax_cross_entropy_with_logits(
                       labels=groundTruthInput, logits=logits)
        with tf.name_scope('total'):
            crossEntropyMean = tf.reduce_mean(crossEntropy)
    tf.summary.scalar('cross_entropy', crossEntropyMean)

    with tf.name_scope('train'):
        optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
        trainStep = optimizer.minimize(crossEntropyMean)

    return (trainStep, crossEntropyMean, bottleneckInput, groundTruthInput,
            finalTensor)
Second_Purification.py 文件源码 项目:PlantImageRecognition 作者: HeavenMin 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def finalTrainingLayer(classCount, finalTensorName, bottleneckTensor):
    with tf.name_scope('input'):
        bottleneckInput = tf.placeholder_with_default(
            bottleneckTensor, shape = [None, BOTTLENECK_TENSOR_SIZE],
            name = 'BottleneckInputPlaceholder')

    groundTruthInput = tf.placeholder(tf.float32,
                                      [None, classCount],
                                      name = 'GroundTruthInput')
    layerName = 'finalLayer'
    with tf.name_scope(layerName):
        with tf.name_scope('weights'):
            initialValue = tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, classCount],
                                               stddev=0.001)
            layerWeights = tf.Variable(initialValue, name = 'finalWeights')
            tensorBoardUsage(layerWeights)
        with tf.name_scope('biases'):
            layerBiases = tf.Variable(tf.zeros([classCount]), name='finalBiases')
            tensorBoardUsage(layerBiases)
        with tf.name_scope('WxPlusB'):
            logits = tf.matmul(bottleneckInput, layerWeights) + layerBiases
            tf.summary.histogram('pre_activations', logits)

    finalTensor = tf.nn.softmax(logits, name=finalTensorName)
    tf.summary.histogram('activations', finalTensor)

    with tf.name_scope('crossEntropy'):
        crossEntropy = tf.nn.softmax_cross_entropy_with_logits(
                       labels=groundTruthInput, logits=logits)
        with tf.name_scope('total'):
            crossEntropyMean = tf.reduce_mean(crossEntropy)
    tf.summary.scalar('cross_entropy', crossEntropyMean)

    with tf.name_scope('train'):
        optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
        trainStep = optimizer.minimize(crossEntropyMean)

    return (trainStep, crossEntropyMean, bottleneckInput, groundTruthInput,
            finalTensor)
First_Purification.py 文件源码 项目:PlantImageRecognition 作者: HeavenMin 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def finalTrainingLayer(classCount, finalTensorName, bottleneckTensor):
    with tf.name_scope('input'):
        bottleneckInput = tf.placeholder_with_default(
            bottleneckTensor, shape = [None, BOTTLENECK_TENSOR_SIZE],
            name = 'BottleneckInputPlaceholder')

    groundTruthInput = tf.placeholder(tf.float32,
                                      [None, classCount],
                                      name = 'GroundTruthInput')
    layerName = 'finalLayer'
    with tf.name_scope(layerName):
        with tf.name_scope('weights'):
            initialValue = tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, classCount],
                                               stddev=0.001)
            layerWeights = tf.Variable(initialValue, name = 'finalWeights')
            tensorBoardUsage(layerWeights)
        with tf.name_scope('biases'):
            layerBiases = tf.Variable(tf.zeros([classCount]), name='finalBiases')
            tensorBoardUsage(layerBiases)
        with tf.name_scope('WxPlusB'):
            logits = tf.matmul(bottleneckInput, layerWeights) + layerBiases
            tf.summary.histogram('pre_activations', logits)

    finalTensor = tf.nn.softmax(logits, name=finalTensorName)
    tf.summary.histogram('activations', finalTensor)

    with tf.name_scope('crossEntropy'):
        crossEntropy = tf.nn.softmax_cross_entropy_with_logits(
                       labels=groundTruthInput, logits=logits)
        with tf.name_scope('total'):
            crossEntropyMean = tf.reduce_mean(crossEntropy)
    tf.summary.scalar('cross_entropy', crossEntropyMean)

    with tf.name_scope('train'):
        optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
        trainStep = optimizer.minimize(crossEntropyMean)

    return (trainStep, crossEntropyMean, bottleneckInput, groundTruthInput,
            finalTensor)
Train_Test.py 文件源码 项目:PlantImageRecognition 作者: HeavenMin 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def finalTrainingLayer(classCount, finalTensorName, bottleneckTensor):
    with tf.name_scope('input'):
        bottleneckInput = tf.placeholder_with_default(
            bottleneckTensor, shape = [None, BOTTLENECK_TENSOR_SIZE],
            name = 'BottleneckInputPlaceholder')

    groundTruthInput = tf.placeholder(tf.float32,
                                      [None, classCount],
                                      name = 'GroundTruthInput')
    layerName = 'finalLayer'
    with tf.name_scope(layerName):
        with tf.name_scope('weights'):
            initialValue = tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, classCount],
                                               stddev=0.001)
            layerWeights = tf.Variable(initialValue, name = 'finalWeights')
            tensorBoardUsage(layerWeights)
        with tf.name_scope('biases'):
            layerBiases = tf.Variable(tf.zeros([classCount]), name='finalBiases')
            tensorBoardUsage(layerBiases)
        with tf.name_scope('WxPlusB'):
            logits = tf.matmul(bottleneckInput, layerWeights) + layerBiases
            tf.summary.histogram('pre_activations', logits)

    finalTensor = tf.nn.softmax(logits, name=finalTensorName)
    tf.summary.histogram('activations', finalTensor)

    with tf.name_scope('crossEntropy'):
        crossEntropy = tf.nn.softmax_cross_entropy_with_logits(
                       labels=groundTruthInput, logits=logits)
        with tf.name_scope('total'):
            crossEntropyMean = tf.reduce_mean(crossEntropy)
    tf.summary.scalar('cross_entropy', crossEntropyMean)

    with tf.name_scope('train'):
        optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
        trainStep = optimizer.minimize(crossEntropyMean)

    return (trainStep, crossEntropyMean, bottleneckInput, groundTruthInput,
            finalTensor)
train_val.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __init__(self, conf, images=None, scores=None, goal_pos=None, desig_pos=None):
        batchsize = int(conf['batch_size'])
        if goal_pos is None:
            self.goal_pos = goal_pos= tf.placeholder(tf.float32, name='goalpos', shape=(batchsize, 2))
        if desig_pos is None:
            self.desig_pos = desig_pos =  tf.placeholder(tf.float32, name='desig_pos_pl', shape=(batchsize, 2))
        if scores is None:
            self.scores = scores = tf.placeholder(tf.float32, name='score_pl', shape=(batchsize, 1))
        if images is None:
            self.images = images = tf.placeholder(tf.float32, name='images_pl', shape=(batchsize, 1, 64,64,3))

        self.prefix = prefix = tf.placeholder(tf.string, [])

        from value_model import construct_model

        summaries = []
        inf_scores = construct_model(conf, images, goal_pos, desig_pos)
        self.inf_scores = inf_scores
        self.loss = loss = mean_squared_error(inf_scores, scores)

        summaries.append(tf.scalar_summary(prefix + '_loss', loss))

        self.lr = tf.placeholder_with_default(conf['learning_rate'], ())

        self.train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)
        self.summ_op = tf.merge_summary(summaries)
inputs.py 文件源码 项目:atari-rl 作者: brendanator 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def auto_placeholder(dtype, shape, name, feed_data, preprocess_offset=None):
  placeholder_shape = [None, None] + list(shape)[1:] if shape else shape
  placeholder = tf.placeholder(dtype, placeholder_shape, name)
  placeholder.required_feeds = RequiredFeeds(placeholder)
  placeholder.feed_data = feed_data

  tensor = preprocess_offset(placeholder) if preprocess_offset else placeholder

  def offset_data(t, name):
    input_len = shape[0]
    if not hasattr(placeholder, 'zero_offset'):
      placeholder.zero_offset = tf.placeholder_with_default(
          input_len - 1,  # If no zero_offset is given assume that t = 0
          (),
          name + '/zero_offset')

    end = t + 1
    start = end - input_len
    zero_offset = placeholder.zero_offset
    offset_tensor = tensor[:, start + zero_offset:end + zero_offset]

    input_range = np.arange(start, end)
    offset_tensor.required_feeds = RequiredFeeds(placeholder, input_range)

    return tf.reshape(offset_tensor, [-1] + shape, name)

  placeholder.offset_data = offset_data
  return placeholder
model.py 文件源码 项目:tensorsandbox 作者: kaizouman 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, wd=WEIGHT_DECAY, dropout=0.0):

        self.wd = wd
        self.dropout = dropout
        self.sizes = []
        self.flops = []
        self.training = tf.placeholder_with_default(False, shape=[], name="training")
data.py 文件源码 项目:tensorsandbox 作者: kaizouman 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def train_inputs(data_dir):
    """Construct input for CIFAR training.

    Note that batch_size is a placeholder whose default value is the one
    specified during training. It can however be specified differently at
    inference time by passing it explicitly in the feed dict when sess.run is
    called.

    Args:
        data_dir: Path to the CIFAR-10 data directory.

    Returns:
        images: Images. 4D tensor [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3].
        labels: Labels. 1D tensor [batch_size].
    """

    # Transpose dimensions
    raw_image, label = get_raw_input_data(False, data_dir)

    # If needed, perform data augmentation
    if tf.app.flags.FLAGS.data_aug:
        image = distort_image(raw_image)
    else:
        image = raw_image

    # Normalize image (substract mean and divide by variance)
    float_image = tf.image.per_image_standardization(image)

    # Create a queue to extract batch of samples
    batch_size_tensor = tf.placeholder_with_default(FLAGS.batch_size, shape=[])
    images, labels = tf.train.shuffle_batch([float_image,label],
                                     batch_size = batch_size_tensor,
                                     num_threads = NUM_THREADS,
                                     capacity = 20000 + 3 * FLAGS.batch_size,
                                     min_after_dequeue = 20000)

    # Display the training images in the visualizer
    tf.summary.image('images', images)

    return images, tf.reshape(labels, [-1])
beam_search_decoder.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_initial_loop_state(self) -> BeamSearchLoopState:
        # TODO make these feedable
        output_ta = SearchStepOutputTA(
            scores=tf.TensorArray(dtype=tf.float32, dynamic_size=True,
                                  size=0, name="beam_scores"),
            parent_ids=tf.TensorArray(dtype=tf.int32, dynamic_size=True,
                                      size=0, name="beam_parents"),
            token_ids=tf.TensorArray(dtype=tf.int32, dynamic_size=True,
                                     size=0, name="beam_tokens"))

        # We run the decoder once to get logits for ensembling
        dec_ls = self.parent_decoder.get_initial_loop_state()
        decoder_body = self.parent_decoder.get_body(False)
        dec_ls = decoder_body(*dec_ls)

        # We want to feed these values in ensembles
        self._search_state = SearchState(
            logprob_sum=tf.placeholder_with_default([0.0], [None]),
            prev_logprobs=tf.nn.log_softmax(dec_ls.feedables.prev_logits),
            lengths=tf.placeholder_with_default([1], [None]),
            finished=tf.placeholder_with_default([False], [None]))

        self._decoder_state = dec_ls.feedables

        # TODO make TensorArrays also feedable
        return BeamSearchLoopState(
            bs_state=self._search_state,
            bs_output=output_ta,
            decoder_loop_state=dec_ls)
beam_search_decoder.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get_initial_loop_state(self) -> BeamSearchLoopState:
        # TODO make these feedable
        output_ta = SearchStepOutputTA(
            scores=tf.TensorArray(dtype=tf.float32, dynamic_size=True,
                                  size=0, name="beam_scores"),
            parent_ids=tf.TensorArray(dtype=tf.int32, dynamic_size=True,
                                      size=0, name="beam_parents"),
            token_ids=tf.TensorArray(dtype=tf.int32, dynamic_size=True,
                                     size=0, name="beam_tokens"))

        # We run the decoder once to get logits for ensembling
        dec_ls = self.parent_decoder.get_initial_loop_state()
        decoder_body = self.parent_decoder.get_body(False)
        dec_ls = decoder_body(*dec_ls)

        # We want to feed these values in ensembles
        self._search_state = SearchState(
            logprob_sum=tf.placeholder_with_default([0.0], [None]),
            prev_logprobs=tf.nn.log_softmax(dec_ls.feedables.prev_logits),
            lengths=tf.placeholder_with_default([1], [None]),
            finished=tf.placeholder_with_default([False], [None]))

        self._decoder_state = dec_ls.feedables

        # TODO make TensorArrays also feedable
        return BeamSearchLoopState(
            bs_state=self._search_state,
            bs_output=output_ta,
            decoder_loop_state=dec_ls)
beam_search_decoder.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_initial_loop_state(self) -> BeamSearchLoopState:
        # TODO make these feedable
        output_ta = SearchStepOutputTA(
            scores=tf.TensorArray(dtype=tf.float32, dynamic_size=True,
                                  size=0, name="beam_scores"),
            parent_ids=tf.TensorArray(dtype=tf.int32, dynamic_size=True,
                                      size=0, name="beam_parents"),
            token_ids=tf.TensorArray(dtype=tf.int32, dynamic_size=True,
                                     size=0, name="beam_tokens"))

        # We run the decoder once to get logits for ensembling
        dec_ls = self.parent_decoder.get_initial_loop_state()
        decoder_body = self.parent_decoder.get_body(False)
        dec_ls = decoder_body(*dec_ls)

        # We want to feed these values in ensembles
        self._search_state = SearchState(
            logprob_sum=tf.placeholder_with_default([0.0], [None]),
            prev_logprobs=tf.nn.log_softmax(dec_ls.feedables.prev_logits),
            lengths=tf.placeholder_with_default([1], [None]),
            finished=tf.placeholder_with_default([False], [None]))

        self._decoder_state = dec_ls.feedables

        # TODO make TensorArrays also feedable
        return BeamSearchLoopState(
            bs_state=self._search_state,
            bs_output=output_ta,
            decoder_loop_state=dec_ls)
model.py 文件源码 项目:supervised-embedding-model 作者: sld 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _init_summaries(self):
        self.accuracy = tf.placeholder_with_default(0.0, shape=(), name='Accuracy')
        self.accuracy_summary = tf.scalar_summary('Accuracy summary', self.accuracy)

        self.f_pos_summary = tf.histogram_summary('f_pos', self.f_pos)
        self.f_neg_summary = tf.histogram_summary('f_neg', self.f_neg)

        self.loss_summary = tf.scalar_summary('Mini-batch loss', self.loss)
        self.summary_op = tf.merge_summary(
            [
                self.f_pos_summary,
                self.f_neg_summary,
                self.loss_summary
            ]
        )
expert.py 文件源码 项目:examples 作者: guildai 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def init_model():
    global x, y

    # Input layer
    x = tf.placeholder(tf.float32, [None, 784])

    # First convolutional layer
    W_conv1 = weight_variable([5, 5, 1, 32])
    b_conv1 = bias_variable([32])
    x_image = tf.reshape(x, [-1, 28, 28, 1])
    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
    h_pool1 = max_pool_2x2(h_conv1)

    # Second convolutional layer
    W_conv2 = weight_variable([5, 5, 32, 64])
    b_conv2 = bias_variable([64])
    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
    h_pool2 = max_pool_2x2(h_conv2)

    # First fully connected layer
    W_fc1 = weight_variable([7 * 7 * 64, 1024])
    b_fc1 = bias_variable([1024])
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

    # Dropout
    keep_prob = tf.placeholder_with_default(1.0, [])
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    # Output layer
    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    y = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
tfqueue.py 文件源码 项目:TensorArtist 作者: vacancy 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def setup(self, graph):
        self._placeholders = graph.get_collection(TArtGraphKeys.PLACEHOLDERS)
        placeholders_dtypes = [x.dtype for x in self._placeholders]
        self._input_queue = tf.FIFOQueue(self._env.flags.input_queue_size, placeholders_dtypes, name=self._name)
        self._input_queue_cond = tf.placeholder_with_default(True, shape=[], name=self._name + '_cond')

        self.enqueue_op = self._input_queue.enqueue(self._placeholders)
        self.dequeue_op = self._input_queue.dequeue()
        self.close_op = self._input_queue.close(cancel_pending_enqueues=True)
        self.qsize_op = self._input_queue.size()

        for a, b in zip(self._placeholders, self.dequeue_op):
            as_tftensor(b).set_shape(as_tftensor(a).get_shape())

        self.edit_graph(graph)


问题


面经


文章

微信
公众号

扫码关注公众号