python类merge_summary()的实例源码

evaluator.py 文件源码 项目:easy-tensorflow 作者: khanhptnk 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def run(self):
    """Run evaluation."""
    # Create logging directory if not exists.
    if not os.path.isdir(self._eval_log_dir):
      os.makedirs(self._eval_log_dir)

    # Compute loss function and other evaluating metrics.
    self._initialize()

    # Visualize input images in Tensorboard.
    self._summary_ops.append(tf.image_summary("Eval_Image", self._observations, max_images=5))

    # Use `slim.evaluation.evaluation_loop` to evaluate the model periodically.
    slim.evaluation.evaluation_loop(
        master='',
        checkpoint_dir=self._train_log_dir,
        logdir=self._eval_log_dir,
        num_evals=self._config.num_batches,
        eval_op=self._metrics_to_updates.values(),
        summary_op=tf.merge_summary(self._summary_ops),
        eval_interval_secs=self._config.eval_interval_secs)
trainer.py 文件源码 项目:easy-tensorflow 作者: khanhptnk 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def run(self):
    """Run training."""
    # Create logging directory if not exists.
    if not os.path.isdir(self._train_log_dir):
      os.makedirs(self._train_log_dir)

    # Load data and compute loss function
    self._initialize()

    # Visualize input images in Tensorboard.
    self._summary_ops.append(tf.image_summary("Image_Train", self._observations, max_images=5))

    # Initialize optimizer.
    optimizer = tf.train.AdadeltaOptimizer(self._config.learning_rate)
    train_op = slim.learning.create_train_op(self._loss, optimizer)

    # Use `slim.learning.train` to manage training.
    slim.learning.train(train_op=train_op,
                        logdir=self._train_log_dir,
                        graph=self._graph,
                        number_of_steps=self._config.train_steps,
                        summary_op=tf.merge_summary(self._summary_ops),
                        save_summaries_secs=self._config.save_summaries_secs,
                        save_interval_secs=self._config.save_interval_secs)
trainer.py 文件源码 项目:how_to_convert_text_to_images 作者: llSourcell 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def define_summaries(self):
        '''Helper function for init_opt'''
        all_sum = {'g': [], 'd': [], 'hr_g': [], 'hr_d': [], 'hist': []}
        for k, v in self.log_vars:
            if k.startswith('g'):
                all_sum['g'].append(tf.scalar_summary(k, v))
            elif k.startswith('d'):
                all_sum['d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hr_g'):
                all_sum['hr_g'].append(tf.scalar_summary(k, v))
            elif k.startswith('hr_d'):
                all_sum['hr_d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hist'):
                all_sum['hist'].append(tf.histogram_summary(k, v))

        self.g_sum = tf.merge_summary(all_sum['g'])
        self.d_sum = tf.merge_summary(all_sum['d'])
        self.hr_g_sum = tf.merge_summary(all_sum['hr_g'])
        self.hr_d_sum = tf.merge_summary(all_sum['hr_d'])
        self.hist_sum = tf.merge_summary(all_sum['hist'])
trainer.py 文件源码 项目:how_to_convert_text_to_images 作者: llSourcell 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def visualization(self, n):
        fake_sum_train, superimage_train =\
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test =\
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])

        hr_fake_sum_train, hr_superimage_train =\
            self.visualize_one_superimage(self.hr_fake_images[:n * n],
                                          self.hr_images[:n * n, :, :, :],
                                          n, "hr_train")
        hr_fake_sum_test, hr_superimage_test =\
            self.visualize_one_superimage(self.hr_fake_images[n * n:2 * n * n],
                                          self.hr_images[n * n:2 * n * n],
                                          n, "hr_test")
        self.hr_superimages =\
            tf.concat(0, [hr_superimage_train, hr_superimage_test])
        self.hr_image_summary =\
            tf.merge_summary([hr_fake_sum_train, hr_fake_sum_test])
summarizer.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def summarize_variables(train_vars=None, summary_collection="tflearn_summ"):
    """ summarize_variables.

    Arguemnts:
        train_vars: list of `Variable`. The variable weights to monitor.
        summary_collection: A collection to add this summary to and
            also used for returning a merged summary over all its elements.
            Default: 'tflearn_summ'.

    Returns:
        `Tensor`. Merge of all summary in 'summary_collection'

    """
    if not train_vars: train_vars = tf.trainable_variables()
    summaries.add_trainable_vars_summary(train_vars, "", "", summary_collection)
    return merge_summary(tf.get_collection(summary_collection))
summarizer.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def summarize(value, type, name, summary_collection="tflearn_summ"):
    """ summarize.

    A custom summarization op.

    Arguemnts:
        value: `Tensor`. The tensor value to monitor.
        type: `str` among 'histogram', 'scalar'. The data monitoring type.
        name: `str`. A name for this summary.
        summary_collection: A collection to add this summary to and
            also used for returning a merged summary over all its elements.
            Default: 'tflearn_summ'.

    Returns:
        `Tensor`. Merge of all summary in 'summary_collection'.

    """
    if tf012:
        name = name.replace(':', '_')
    summaries.get_summary(type, name, value, summary_collection)
    return merge_summary(tf.get_collection(summary_collection))
trainer.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def create_summaries(self, verbose=2):
        """ Create summaries with `verbose` level """

        summ_collection = self.name + "_training_summaries"

        if verbose in [3]:
            # Summarize activations
            activations = tf.get_collection(tf.GraphKeys.ACTIVATIONS)
            summarize_activations(activations, summ_collection)
        if verbose in [2, 3]:
            # Summarize variable weights
            summarize_variables(self.train_vars, summ_collection)
        if verbose in [1, 2, 3]:
            # Summarize gradients
            summarize_gradients(self.grad, summ_collection)

        self.summ_op = merge_summary(tf.get_collection(summ_collection))
trainer.py 文件源码 项目:StackGAN 作者: hanzhanggit 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def define_summaries(self):
        '''Helper function for init_opt'''
        all_sum = {'g': [], 'd': [], 'hr_g': [], 'hr_d': [], 'hist': []}
        for k, v in self.log_vars:
            if k.startswith('g'):
                all_sum['g'].append(tf.scalar_summary(k, v))
            elif k.startswith('d'):
                all_sum['d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hr_g'):
                all_sum['hr_g'].append(tf.scalar_summary(k, v))
            elif k.startswith('hr_d'):
                all_sum['hr_d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hist'):
                all_sum['hist'].append(tf.histogram_summary(k, v))

        self.g_sum = tf.merge_summary(all_sum['g'])
        self.d_sum = tf.merge_summary(all_sum['d'])
        self.hr_g_sum = tf.merge_summary(all_sum['hr_g'])
        self.hr_d_sum = tf.merge_summary(all_sum['hr_d'])
        self.hist_sum = tf.merge_summary(all_sum['hist'])
trainer.py 文件源码 项目:StackGAN 作者: hanzhanggit 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def visualization(self, n):
        fake_sum_train, superimage_train =\
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test =\
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])

        hr_fake_sum_train, hr_superimage_train =\
            self.visualize_one_superimage(self.hr_fake_images[:n * n],
                                          self.hr_images[:n * n, :, :, :],
                                          n, "hr_train")
        hr_fake_sum_test, hr_superimage_test =\
            self.visualize_one_superimage(self.hr_fake_images[n * n:2 * n * n],
                                          self.hr_images[n * n:2 * n * n],
                                          n, "hr_test")
        self.hr_superimages =\
            tf.concat(0, [hr_superimage_train, hr_superimage_test])
        self.hr_image_summary =\
            tf.merge_summary([hr_fake_sum_train, hr_fake_sum_test])
deepSpeech_train.py 文件源码 项目:deepSpeech 作者: fordDeepDSP 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def add_summaries(summaries, learning_rate, grads):
    """ Add summary ops"""

    # Track quantities for Tensorboard display
    summaries.append(tf.scalar_summary('learning_rate', learning_rate))
    # Add histograms for gradients.
    for grad, var in grads:
        if grad is not None:
            summaries.append(
                tf.histogram_summary(var.op.name +
                                     '/gradients', grad))
    # Add histograms for trainable variables.
    for var in tf.trainable_variables():
        summaries.append(tf.histogram_summary(var.op.name, var))

    # Build the summary operation from the last tower summaries.
    summary_op = tf.merge_summary(summaries)
    return summary_op
region_proposal.py 文件源码 项目:lstm-rcnn-pedestrian-detection 作者: buffer51 项目源码 文件源码 阅读 117 收藏 0 点赞 0 评论 0
def create_train_summaries(learning_rate, clas_loss, reg_loss, rpn_loss, clas_accuracy, clas_positive_percentage, clas_positive_accuracy, VGG16D_activations, clas_activations):
    with tf.name_scope('train'):
        learning_rate_summary = tf.scalar_summary('learning_rate', learning_rate)

        loss_clas_summary = tf.scalar_summary('loss/clas', clas_loss)
        loss_reg_summary = tf.scalar_summary('loss/reg', reg_loss)
        loss_rpn_summary = tf.scalar_summary('loss/rpn', rpn_loss)

        stat_accuracy_summary = tf.scalar_summary('stat/accuracy', clas_accuracy)
        stat_positive_percentage_summary = tf.scalar_summary('stat/positive_percentage', clas_positive_percentage)
        stat_positive_accuracy_summary = tf.scalar_summary('stat/positive_accuracy', clas_positive_accuracy)

        VGG16D_histogram = tf.histogram_summary('activations/VGG16D', VGG16D_activations)
        clas_histogram = tf.histogram_summary('activations/clas', clas_activations)

        return tf.merge_summary([learning_rate_summary, loss_clas_summary, loss_reg_summary, loss_rpn_summary, stat_accuracy_summary, stat_positive_percentage_summary, stat_positive_accuracy_summary, VGG16D_histogram, clas_histogram])
actor.py 文件源码 项目:needle 作者: roosephu 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def build_train(self):

        self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=tf.get_variable_scope().name)
        regularization = tf.contrib.layers.apply_regularization(
            tf.contrib.layers.l2_regularizer(0.01),
            self.variables,
        )

        self.op_grad_actions = tf.placeholder(tf.float32, [None, self.action_dim])
        self.op_loss = tf.reduce_sum(-self.op_grad_actions * self.op_actions) # + regularization
        self.op_summary = tf.merge_summary([
            tf.scalar_summary("actor loss", self.op_loss),
            tf.histogram_summary("actor", self.op_actions),
        ])

        self.op_train = tf.train.AdamOptimizer(self.learning_rate).minimize(self.op_loss)

    # def get_op_train(self):
    #     self.op_grads = tf.gradients(self.op_actions, self.variables, -self.op_grad_actions)
    #     self.op_grads2 = tf.gradients(self.op_loss, self.variables)
    #     return tf.train.AdamOptimizer(1e-4).apply_gradients(zip(self.op_grads2, self.variables))
critic.py 文件源码 项目:needle 作者: roosephu 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def build_train(self):
        self.op_rewards = tf.placeholder(tf.float32, [None])

        self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=tf.get_variable_scope().name)
        regularization = tf.contrib.layers.apply_regularization(
            tf.contrib.layers.l2_regularizer(0.01),
            self.variables,
        )
        self.op_loss = tf.reduce_mean((self.op_rewards - self.op_critic)**2) + regularization
        self.op_summary = tf.merge_summary([
            tf.scalar_summary("critic loss", self.op_loss),
            tf.histogram_summary("critic", self.op_critic),
        ])

        self.op_grad_actions = tf.gradients(self.op_critic, self.op_actions)[0]
        self.op_train = tf.train.AdamOptimizer(self.learning_rate).minimize(self.op_loss)
trainer.py 文件源码 项目:how_to_convert_text_to_images 作者: llSourcell 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def define_summaries(self):
        '''Helper function for init_opt'''
        all_sum = {'g': [], 'd': [], 'hist': []}
        for k, v in self.log_vars:
            if k.startswith('g'):
                all_sum['g'].append(tf.scalar_summary(k, v))
            elif k.startswith('d'):
                all_sum['d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hist'):
                all_sum['hist'].append(tf.histogram_summary(k, v))

        self.g_sum = tf.merge_summary(all_sum['g'])
        self.d_sum = tf.merge_summary(all_sum['d'])
        self.hist_sum = tf.merge_summary(all_sum['hist'])
trainer.py 文件源码 项目:how_to_convert_text_to_images 作者: llSourcell 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def visualization(self, n):
        fake_sum_train, superimage_train = \
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test = \
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])
summarizer.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def summarize_activations(activations, summary_collection="tflearn_summ"):
    """ summarize_activations.

    Arguemnts:
        activations: list of `Tensor`. The activations to monitor.
        summary_collection: A collection to add this summary to and
            also used for returning a merged summary over all its elements.
            Default: 'tflearn_summ'.

    Returns:
        `Tensor`. Merge of all summary in 'summary_collection'

    """
    summaries.add_activations_summary(activations, "", "", summary_collection)
    return merge_summary(tf.get_collection(summary_collection))
summarizer.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def summarize_gradients(grads, summary_collection="tflearn_summ"):
    """ summarize_gradients.

    Arguemnts:
        grads: list of `Tensor`. The gradients to monitor.
        summary_collection: A collection to add this summary to and
            also used for returning a merged summary over all its elements.
            Default: 'tflearn_summ'.

    Returns:
        `Tensor`. Merge of all summary in 'summary_collection'

    """
    summaries.add_gradients_summary(grads, "", "", summary_collection)
    return merge_summary(tf.get_collection(summary_collection))
trainer.py 文件源码 项目:StackGAN 作者: hanzhanggit 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def define_summaries(self):
        '''Helper function for init_opt'''
        all_sum = {'g': [], 'd': [], 'hist': []}
        for k, v in self.log_vars:
            if k.startswith('g'):
                all_sum['g'].append(tf.scalar_summary(k, v))
            elif k.startswith('d'):
                all_sum['d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hist'):
                all_sum['hist'].append(tf.histogram_summary(k, v))

        self.g_sum = tf.merge_summary(all_sum['g'])
        self.d_sum = tf.merge_summary(all_sum['d'])
        self.hist_sum = tf.merge_summary(all_sum['hist'])
trainer.py 文件源码 项目:StackGAN 作者: hanzhanggit 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def visualization(self, n):
        fake_sum_train, superimage_train = \
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test = \
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])
model.py 文件源码 项目:bi-att-flow 作者: allenai 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __init__(self, config, scope):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, M, None], name='x')
        self.cx = tf.placeholder('int32', [N, M, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, M, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, JQ], name='q')
        self.cq = tf.placeholder('int32', [N, JQ, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, JQ], name='q_mask')
        self.y = tf.placeholder('bool', [N, M, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope))
model.py 文件源码 项目:Chinese-QA 作者: distantJing 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __init__(self, config, scope):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, M, None], name='x')
        self.cx = tf.placeholder('int32', [N, M, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, M, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, JQ], name='q')
        self.cq = tf.placeholder('int32', [N, JQ, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, JQ], name='q_mask')
        self.y = tf.placeholder('bool', [N, M, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope))
train_val.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def __init__(self, conf, images=None, scores=None, goal_pos=None, desig_pos=None):
        batchsize = int(conf['batch_size'])
        if goal_pos is None:
            self.goal_pos = goal_pos= tf.placeholder(tf.float32, name='goalpos', shape=(batchsize, 2))
        if desig_pos is None:
            self.desig_pos = desig_pos =  tf.placeholder(tf.float32, name='desig_pos_pl', shape=(batchsize, 2))
        if scores is None:
            self.scores = scores = tf.placeholder(tf.float32, name='score_pl', shape=(batchsize, 1))
        if images is None:
            self.images = images = tf.placeholder(tf.float32, name='images_pl', shape=(batchsize, 1, 64,64,3))

        self.prefix = prefix = tf.placeholder(tf.string, [])

        from value_model import construct_model

        summaries = []
        inf_scores = construct_model(conf, images, goal_pos, desig_pos)
        self.inf_scores = inf_scores
        self.loss = loss = mean_squared_error(inf_scores, scores)

        summaries.append(tf.scalar_summary(prefix + '_loss', loss))

        self.lr = tf.placeholder_with_default(conf['learning_rate'], ())

        self.train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)
        self.summ_op = tf.merge_summary(summaries)
model.py 文件源码 项目:adversarial-squad 作者: robinjia 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def __init__(self, config, scope):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, M, None], name='x')
        self.cx = tf.placeholder('int32', [N, M, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, M, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, JQ], name='q')
        self.cq = tf.placeholder('int32', [N, JQ, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, JQ], name='q_mask')
        self.y = tf.placeholder('bool', [N, M, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope))
region_proposal.py 文件源码 项目:lstm-rcnn-pedestrian-detection 作者: buffer51 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def create_test_summaries(test_placeholders):
    with tf.name_scope('test'):
        accuracy_summary = tf.scalar_summary('accuracy', test_placeholders[0])

        positive_recall_summary = tf.scalar_summary('recall/positive', test_placeholders[1])
        negative_recall_summary = tf.scalar_summary('recall/negative', test_placeholders[2])
        recall_summary = tf.scalar_summary('recall/global', test_placeholders[3])

        positive_precision_summary = tf.scalar_summary('precision/positive', test_placeholders[4])
        negative_precision_summary = tf.scalar_summary('precision/negative', test_placeholders[5])
        precision_summary = tf.scalar_summary('precision/global', test_placeholders[6])

        F_score_summary = tf.scalar_summary('F-score', test_placeholders[7])

        return tf.merge_summary([accuracy_summary, positive_recall_summary, negative_recall_summary, recall_summary, positive_precision_summary, negative_precision_summary,precision_summary, F_score_summary])
common.py 文件源码 项目:AS_6Dof_Arm 作者: yao62995 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def grad_histograms(grads_and_vars):
        s = []
        for grad, var in grads_and_vars:
            s.append(tf.histogram_summary(var.op.name + '', var))
            s.append(tf.histogram_summary(var.op.name + '/gradients', grad))
        return tf.merge_summary(s)
model.py 文件源码 项目:supervised-embedding-model 作者: sld 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _init_summaries(self):
        self.accuracy = tf.placeholder_with_default(0.0, shape=(), name='Accuracy')
        self.accuracy_summary = tf.scalar_summary('Accuracy summary', self.accuracy)

        self.f_pos_summary = tf.histogram_summary('f_pos', self.f_pos)
        self.f_neg_summary = tf.histogram_summary('f_neg', self.f_neg)

        self.loss_summary = tf.scalar_summary('Mini-batch loss', self.loss)
        self.summary_op = tf.merge_summary(
            [
                self.f_pos_summary,
                self.f_neg_summary,
                self.loss_summary
            ]
        )
1-mnist.py 文件源码 项目:npfl114 作者: ufal 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def construct(self, hidden_layer_size):
        with self.session.graph.as_default():
            with tf.name_scope("inputs"):
                self.images = tf.placeholder(tf.float32, [None, self.WIDTH, self.HEIGHT, 1], name="images")
                self.labels = tf.placeholder(tf.int64, [None], name="labels")

            flattened_images = tf_layers.flatten(self.images, scope="preprocessing")
            hidden_layer = tf_layers.fully_connected(flattened_images, num_outputs=hidden_layer_size, activation_fn=tf.nn.relu, scope="hidden_layer")
            output_layer = tf_layers.fully_connected(hidden_layer, num_outputs=self.LABELS, activation_fn=None, scope="output_layer")
            self.predictions = tf.argmax(output_layer, 1)

            loss = tf_losses.sparse_softmax_cross_entropy(output_layer, self.labels, scope="loss")
            self.global_step = tf.Variable(0, dtype=tf.int64, trainable=False, name="global_step")
            self.training = tf.train.AdamOptimizer().minimize(loss, global_step=self.global_step)
            self.accuracy = tf_metrics.accuracy(self.predictions, self.labels)

            # Summaries
            self.summaries = {"training": tf.merge_summary([tf.scalar_summary("train/loss", loss),
                                                            tf.scalar_summary("train/accuracy", self.accuracy)])}
            for dataset in ["dev", "test"]:
                self.summaries[dataset] = tf.scalar_summary(dataset+"/accuracy", self.accuracy)

            # Initialize variables
            self.session.run(tf.initialize_all_variables())

        # Finalize graph and log it if requested
        self.session.graph.finalize()
        if self.summary_writer:
            self.summary_writer.add_graph(self.session.graph)
tagger-skeleton.py 文件源码 项目:npfl114 作者: ufal 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, rnn_cell, rnn_cell_dim, method, words, logdir, expname, threads=1, seed=42):
        # Create an empty graph and a session
        graph = tf.Graph()
        graph.seed = seed
        self.session = tf.Session(graph = graph, config=tf.ConfigProto(inter_op_parallelism_threads=threads,
                                                                       intra_op_parallelism_threads=threads))

        timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S")
        self.summary_writer = tf.train.SummaryWriter("{}/{}-{}".format(logdir, timestamp, expname), flush_secs=10)

        # Construct the graph
        with self.session.graph.as_default():
            if rnn_cell == "LSTM":
                rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_cell_dim)
            elif rnn_cell == "GRU":
                rnn_cell = tf.nn.rnn_cell.GRUCell(rnn_cell_dim)
            else:
                raise ValueError("Unknown rnn_cell {}".format(rnn_cell))

            self.global_step = tf.Variable(0, dtype=tf.int64, trainable=False, name="global_step")
            self.sentence_lens = tf.placeholder(tf.int32, [None])
            self.forms = tf.placeholder(tf.int32, [None, None])
            self.tags = tf.placeholder(tf.int32, [None, None])

            # TODO
            # loss = ...
            # self.training = ...
            # self.predictions = ...
            # self.accuracy = ...

            self.dataset_name = tf.placeholder(tf.string, [])
            self.summary = tf.merge_summary([tf.scalar_summary(self.dataset_name+"/loss", loss),
                                             tf.scalar_summary(self.dataset_name+"/accuracy", self.accuracy)])

            # Initialize variables
            self.session.run(tf.initialize_all_variables())
4-mnist-using-contrib.py 文件源码 项目:npfl114 作者: ufal 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def __init__(self, logdir, experiment, threads):
        # Construct the graph
        with tf.name_scope("inputs"):
            self.images = tf.placeholder(tf.float32, [None, WIDTH, HEIGHT, 1], name="images")
            self.labels = tf.placeholder(tf.int64, [None], name="labels")
            flattened_images = layers.flatten(self.images)

        hidden_layer = layers.fully_connected(flattened_images, num_outputs=HIDDEN, activation_fn=tf.nn.relu, scope="hidden_layer")
        output_layer = layers.fully_connected(hidden_layer, num_outputs=LABELS, activation_fn=None, scope="output_layer")

        loss = losses.sparse_softmax_cross_entropy(output_layer, self.labels, scope="loss")
        self.training = layers.optimize_loss(loss, None, None, tf.train.AdamOptimizer(), summaries=['loss', 'gradients', 'gradient_norm'], name='training')

        with tf.name_scope("accuracy"):
            predictions = tf.argmax(output_layer, 1, name="predictions")
            accuracy = metrics.accuracy(predictions, self.labels)
            tf.scalar_summary("training/accuracy", accuracy)

        with tf.name_scope("confusion_matrix"):
            confusion_matrix = metrics.confusion_matrix(predictions, self.labels, weights=tf.not_equal(predictions, self.labels), dtype=tf.float32)
            confusion_image = tf.reshape(confusion_matrix, [1, LABELS, LABELS, 1])

        # Summaries
        self.summaries = {'training': tf.merge_all_summaries() }
        for dataset in ["dev", "test"]:
            self.summaries[dataset] = tf.merge_summary([tf.scalar_summary(dataset + "/accuracy", accuracy),
                                                        tf.image_summary(dataset + "/confusion_matrix", confusion_image)])

        # Create the session
        self.session = tf.Session(config=tf.ConfigProto(inter_op_parallelism_threads=threads,
                                                        intra_op_parallelism_threads=threads))

        self.session.run(tf.initialize_all_variables())
        timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S")
        self.summary_writer = tf.train.SummaryWriter("{}/{}-{}".format(logdir, timestamp, experiment), graph=self.session.graph, flush_secs=10)
        self.steps = 0
lemmatizer-skeleton.py 文件源码 项目:npfl114 作者: ufal 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, rnn_cell, rnn_cell_dim, num_chars, bow_char, eow_char, logdir, expname, threads=1, seed=42):
        # Create an empty graph and a session
        graph = tf.Graph()
        graph.seed = seed
        self.session = tf.Session(graph = graph, config=tf.ConfigProto(inter_op_parallelism_threads=threads,
                                                                       intra_op_parallelism_threads=threads))

        timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S")
        self.summary_writer = tf.train.SummaryWriter("{}/{}-{}".format(logdir, timestamp, expname), flush_secs=10)

        # Construct the graph
        with self.session.graph.as_default():
            if rnn_cell == "LSTM":
                rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_cell_dim)
            elif rnn_cell == "GRU":
                rnn_cell = tf.nn.rnn_cell.GRUCell(rnn_cell_dim)
            else:
                raise ValueError("Unknown rnn_cell {}".format(rnn_cell))

            self.global_step = tf.Variable(0, dtype=tf.int64, trainable=False, name="global_step")
            self.sentence_lens = tf.placeholder(tf.int32, [None])
            self.form_ids = tf.placeholder(tf.int32, [None, None])
            self.forms = tf.placeholder(tf.int32, [None, None])
            self.form_lens = tf.placeholder(tf.int32, [None])
            self.lemma_ids = tf.placeholder(tf.int32, [None, None])
            self.lemmas = tf.placeholder(tf.int32, [None, None])
            self.lemma_lens = tf.placeholder(tf.int32, [None])

            # TODO
            # loss = ...
            # self.training = ...
            # self.predictions = ...
            # self.accuracy = ...

            self.dataset_name = tf.placeholder(tf.string, [])
            self.summary = tf.merge_summary([tf.scalar_summary(self.dataset_name+"/loss", loss),
                                             tf.scalar_summary(self.dataset_name+"/accuracy", self.accuracy)])

            # Initialize variables
            self.session.run(tf.initialize_all_variables())


问题


面经


文章

微信
公众号

扫码关注公众号