python类merge_all_summaries()的实例源码

eval.py 文件源码 项目:web_page_classification 作者: yuhui-lin 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def evaluate():
    """Eval CNN for a number of steps."""
    with tf.Graph().as_default() as g, tf.device("/cpu:0"):
        # Get sequences and labels
        sequences, labels = model.inputs_eval()

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = model.inference(sequences)

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        # # Restore the moving average version of the learned variables for eval.
        # variable_averages = tf.train.ExponentialMovingAverage(
        #     model.MOVING_AVERAGE_DECAY)
        # variables_to_restore = variable_averages.variables_to_restore()
        # saver = tf.train.Saver(variables_to_restore)
        saver = tf.train.Saver(tf.all_variables())

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        summary_writer = tf.train.SummaryWriter(EVAL_DIR, g)

        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if FLAGS.run_once:
                print("eval only once, stope eval")
                break
            print("sleep for {} seconds".format(FLAGS.eval_interval_secs))
            time.sleep(FLAGS.eval_interval_secs)
eval.py 文件源码 项目:web_page_classification 作者: yuhui-lin 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def evaluate():
    """Eval CNN for a number of steps."""
    with tf.Graph().as_default() as g, tf.device("/cpu:0"):
        # Get sequences and labels
        sequences, labels = model.inputs_eval()

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = model.inference(sequences)

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        # # Restore the moving average version of the learned variables for eval.
        # variable_averages = tf.train.ExponentialMovingAverage(
        #     model.MOVING_AVERAGE_DECAY)
        # variables_to_restore = variable_averages.variables_to_restore()
        # saver = tf.train.Saver(variables_to_restore)
        saver = tf.train.Saver(tf.all_variables())

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        summary_writer = tf.train.SummaryWriter(EVAL_DIR, g)

        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if FLAGS.run_once:
                print("eval only once, stope eval")
                break
            print("sleep for {} seconds".format(FLAGS.eval_interval_secs))
            time.sleep(FLAGS.eval_interval_secs)
model.py 文件源码 项目:bi-att-flow 作者: allenai 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, config):
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W, H = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size, config.max_tree_height
        self.x = tf.placeholder('int32', [None, M, JX], name='x')
        self.cx = tf.placeholder('int32', [None, M, JX, W], name='cx')
        self.q = tf.placeholder('int32', [None, JQ], name='q')
        self.cq = tf.placeholder('int32', [None, JQ, W], name='cq')
        self.tx = tf.placeholder('int32', [None, M, H, JX], name='tx')
        self.tx_edge_mask = tf.placeholder('bool', [None, M, H, JX, JX], name='tx_edge_mask')
        self.y = tf.placeholder('bool', [None, M, H, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')

        # Define misc

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()

        self.ema_op = self._get_ema_op()
        self.summary = tf.merge_all_summaries()
model.py 文件源码 项目:bi-att-flow 作者: allenai 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, config, scope):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, M, None], name='x')
        self.cx = tf.placeholder('int32', [N, M, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, M, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, JQ], name='q')
        self.cq = tf.placeholder('int32', [N, JQ, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, JQ], name='q_mask')
        self.y = tf.placeholder('bool', [N, M, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope))
train_lite.py 文件源码 项目:chn_handwriting 作者: zhangchunsheng 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def train_hand_write_cnn():
    output = chinese_hand_write_cnn()

    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(output, Y))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)

    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(output, 1), tf.argmax(Y, 1)), tf.float32))

    # TensorBoard
    tf.scalar_summary("loss", loss)
    tf.scalar_summary("accuracy", accuracy)
    merged_summary_op = tf.merge_all_summaries()

    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # ????? tensorboard --logdir=./log  ???????http://0.0.0.0:6006
        summary_writer = tf.train.SummaryWriter('./log', graph=tf.get_default_graph())

        for e in range(50):
            for i in range(num_batch):
                batch_x = train_data_x[i*batch_size : (i+1)*batch_size]
                batch_y = train_data_y[i*batch_size : (i+1)*batch_size]
                _, loss_, summary = sess.run([optimizer, loss, merged_summary_op], feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.5})
                # ?????????
                summary_writer.add_summary(summary, e*num_batch+i)
                print(e*num_batch+i, loss_)

                if (e*num_batch+i) % 100 == 0:
                    # ?????
                    acc = accuracy.eval({X: text_data_x[:500], Y: text_data_y[:500], keep_prob: 1.})
                    #acc = sess.run(accuracy, feed_dict={X: text_data_x[:500], Y: text_data_y[:500], keep_prob: 1.})
                    print(e*num_batch+i, acc)
eval.py 文件源码 项目:text-classification2 作者: yuhui-lin 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def evaluate():
    """Eval CNN for a number of steps."""
    with tf.Graph().as_default() as g, tf.device("/cpu:0"):
        # Get sequences and labels
        sequences, labels = model.inputs_eval()

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = model.inference(sequences)

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        # # Restore the moving average version of the learned variables for eval.
        # variable_averages = tf.train.ExponentialMovingAverage(
        #     model.MOVING_AVERAGE_DECAY)
        # variables_to_restore = variable_averages.variables_to_restore()
        # saver = tf.train.Saver(variables_to_restore)
        saver = tf.train.Saver(tf.all_variables())

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        summary_writer = tf.train.SummaryWriter(EVAL_DIR, g)

        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if FLAGS.run_once:
                print("eval only once, stope eval")
                break
            print("sleep for {} seconds".format(FLAGS.eval_interval_secs))
            time.sleep(FLAGS.eval_interval_secs)
eval.py 文件源码 项目:text-classification2 作者: yuhui-lin 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def evaluate():
    """Eval CNN for a number of steps."""
    with tf.Graph().as_default() as g, tf.device("/cpu:0"):
        # Get sequences and labels
        sequences, labels = model.inputs_eval()

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = model.inference(sequences)

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        # # Restore the moving average version of the learned variables for eval.
        # variable_averages = tf.train.ExponentialMovingAverage(
        #     model.MOVING_AVERAGE_DECAY)
        # variables_to_restore = variable_averages.variables_to_restore()
        # saver = tf.train.Saver(variables_to_restore)
        saver = tf.train.Saver(tf.all_variables())

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        summary_writer = tf.train.SummaryWriter(EVAL_DIR, g)

        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if FLAGS.run_once:
                print("eval only once, stope eval")
                break
            print("sleep for {} seconds".format(FLAGS.eval_interval_secs))
            time.sleep(FLAGS.eval_interval_secs)
model.py 文件源码 项目:Chinese-QA 作者: distantJing 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, config):
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W, H = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size, config.max_tree_height
        self.x = tf.placeholder('int32', [None, M, JX], name='x')
        self.cx = tf.placeholder('int32', [None, M, JX, W], name='cx')
        self.q = tf.placeholder('int32', [None, JQ], name='q')
        self.cq = tf.placeholder('int32', [None, JQ, W], name='cq')
        self.tx = tf.placeholder('int32', [None, M, H, JX], name='tx')
        self.tx_edge_mask = tf.placeholder('bool', [None, M, H, JX, JX], name='tx_edge_mask')
        self.y = tf.placeholder('bool', [None, M, H, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')

        # Define misc

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()

        self.ema_op = self._get_ema_op()
        self.summary = tf.merge_all_summaries()
model.py 文件源码 项目:Chinese-QA 作者: distantJing 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, config, scope):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, M, None], name='x')
        self.cx = tf.placeholder('int32', [N, M, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, M, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, JQ], name='q')
        self.cq = tf.placeholder('int32', [N, JQ, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, JQ], name='q_mask')
        self.y = tf.placeholder('bool', [N, M, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope))
evaluate.py 文件源码 项目:JetsonTX1_im2txt 作者: Netzeband 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def run():
  """Runs evaluation in a loop, and logs summaries to TensorBoard."""
  # Create the evaluation directory if it doesn't exist.
  eval_dir = FLAGS.eval_dir
  if not tf.gfile.IsDirectory(eval_dir):
    tf.logging.info("Creating eval directory: %s", eval_dir)
    tf.gfile.MakeDirs(eval_dir)

  g = tf.Graph()
  with g.as_default():
    # Build the model for evaluation.
    model_config = configuration.ModelConfig()
    model_config.input_file_pattern = FLAGS.input_file_pattern
    model = show_and_tell_model.ShowAndTellModel(model_config, mode="eval")
    model.build()

    # Create the Saver to restore model Variables.
    saver = tf.train.Saver()

    # Create the summary operation and the summary writer.
    summary_op = tf.merge_all_summaries()
    summary_writer = tf.train.SummaryWriter(eval_dir)

    g.finalize()

    # Run a new evaluation run every eval_interval_secs.
    while True:
      start = time.time()
      tf.logging.info("Starting evaluation at " + time.strftime(
          "%Y-%m-%d-%H:%M:%S", time.localtime()))
      run_once(model, saver, summary_writer, summary_op)
      time_to_next_eval = start + FLAGS.eval_interval_secs - time.time()
      if time_to_next_eval > 0:
        time.sleep(time_to_next_eval)
word2vec.py 文件源码 项目:w2vec-similarity 作者: jayantj 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def train(self):
    """Train the model."""
    opts = self._options
    initial_epoch, initial_words = self._session.run([self._epoch, self._words])
    summary_op = tf.merge_all_summaries()
    summary_writer = tf.train.SummaryWriter(opts.save_path,
                                            graph_def=self._session.graph_def)
    workers = []
    for _ in xrange(opts.concurrent_steps):
      t = threading.Thread(target=self._train_thread_body)
      t.start()
      workers.append(t)
    last_words, last_time, last_summary_time = initial_words, time.time(), 0
    last_checkpoint_time = 0
    while True:
      time.sleep(opts.statistics_interval)  # Reports our progress once a while.
      (epoch, step, loss, words, lr) = self._session.run(
          [self._epoch, self.global_step, self._loss, self._words, self._lr])
      now = time.time()
      last_words, last_time, rate = words, now, (words - last_words) / (
          now - last_time)
      print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r" %
            (epoch, step, lr, loss, rate), end="")
      sys.stdout.flush()
      if now - last_summary_time > opts.summary_interval:
        summary_str = self._session.run(summary_op)
        summary_writer.add_summary(summary_str, step)
        last_summary_time = now
      if now - last_checkpoint_time > opts.checkpoint_interval:
        self.saver.save(self._session,
                        opts.save_path + "model",
                        global_step=step.astype(int))
        last_checkpoint_time = now
      if epoch != initial_epoch:
        break
    for t in workers:
      t.join()
    return epoch
dqn.py 文件源码 项目:dqn 作者: elix-tech 项目源码 文件源码 阅读 64 收藏 0 点赞 0 评论 0
def setup_summary(self):
        episode_total_reward = tf.Variable(0.)
        tf.scalar_summary(ENV_NAME + '/Total Reward/Episode', episode_total_reward)
        episode_avg_max_q = tf.Variable(0.)
        tf.scalar_summary(ENV_NAME + '/Average Max Q/Episode', episode_avg_max_q)
        episode_duration = tf.Variable(0.)
        tf.scalar_summary(ENV_NAME + '/Duration/Episode', episode_duration)
        episode_avg_loss = tf.Variable(0.)
        tf.scalar_summary(ENV_NAME + '/Average Loss/Episode', episode_avg_loss)
        summary_vars = [episode_total_reward, episode_avg_max_q, episode_duration, episode_avg_loss]
        summary_placeholders = [tf.placeholder(tf.float32) for _ in xrange(len(summary_vars))]
        update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in xrange(len(summary_vars))]
        summary_op = tf.merge_all_summaries()
        return summary_placeholders, update_ops, summary_op
cifar10_eval.py 文件源码 项目:SLAM 作者: sanjeevkumar42 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def evaluate():
  """Eval CIFAR-10 for a number of steps."""
  with tf.Graph().as_default() as g:
    # Get images and labels for CIFAR-10.
    eval_data = FLAGS.eval_data == 'test'
    images, labels = cifar10.inputs(eval_data=eval_data)

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits = cifar10.inference(images)

    # Calculate predictions.
    top_k_op = tf.nn.in_top_k(logits, labels, 1)

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        cifar10.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)

    while True:
      eval_once(saver, summary_writer, top_k_op, summary_op)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs)
deepSpeech_test.py 文件源码 项目:deepSpeech 作者: fordDeepDSP 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def evaluate():
    """ Evaluate deepSpeech modelfor a number of steps."""

    with tf.Graph().as_default() as graph:

        # Get feats and labels for deepSpeech.
        feats, labels, seq_lens = deepSpeech.inputs(ARGS.eval_data,
                                                    data_dir=ARGS.data_dir,
                                                    batch_size=ARGS.batch_size,
                                                    use_fp16=ARGS.use_fp16,
                                                    shuffle=True)

        # Build ops that computes the logits predictions from the
        # inference model.
        ARGS.keep_prob = 1.0  # Disable dropout during testing.
        logits = deepSpeech.inference(feats, seq_lens, ARGS)

        # Calculate predictions.
        output_log_prob = tf.nn.log_softmax(logits)
        decoder = tf.nn.ctc_greedy_decoder
        strided_seq_lens = tf.div(seq_lens, ARGS.temporal_stride)
        predictions = decoder(output_log_prob, strided_seq_lens)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            ARGS.moving_avg_decay)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()
        summary_writer = tf.train.SummaryWriter(ARGS.eval_dir, graph)

        while True:
            eval_once(saver, summary_writer, predictions, summary_op, labels)

            if ARGS.run_once:
                break
            time.sleep(ARGS.eval_interval_secs)
dnn.py 文件源码 项目:tfdnn-kaldi 作者: dreaming-dog 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def buildSummaryGraph(self):
        """
            TODO: Have to fix summary update in case of discriminative pre-training
        """
        # TODO: Have to fix summary update in case of discriminative pre - training
        self.summaryWriter = tf.train.SummaryWriter('logdir5', tf.get_default_graph())
        tf.scalar_summary(self.loss.op.name, self.loss)
        tf.scalar_summary(self.learning_rate.op.name, self.learning_rate)
        self.summary = tf.merge_all_summaries()
evaluation_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def testSummariesAreFlushedToDisk(self):
    output_dir = os.path.join(self.get_temp_dir(), 'flush_test')
    if tf.gfile.Exists(output_dir):  # For running on jenkins.
      tf.gfile.DeleteRecursively(output_dir)

    names_to_metrics, names_to_updates = self._create_names_to_metrics(
        self._predictions, self._labels)

    for k in names_to_metrics:
      v = names_to_metrics[k]
      tf.scalar_summary(k, v)

    summary_writer = tf.train.SummaryWriter(output_dir)

    initial_op = tf.group(tf.initialize_all_variables(),
                          tf.initialize_local_variables())
    eval_op = tf.group(*names_to_updates.values())

    with self.test_session() as sess:
      slim.evaluation.evaluation(
          sess,
          initial_op=initial_op,
          eval_op=eval_op,
          summary_op=tf.merge_all_summaries(),
          summary_writer=summary_writer,
          global_step=self._global_step)

      names_to_values = {name: names_to_metrics[name].eval()
                         for name in names_to_metrics}
    self._verify_summaries(output_dir, names_to_values)
evaluation_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def testSummariesAreFlushedToDiskWithoutGlobalStep(self):
    output_dir = os.path.join(self.get_temp_dir(), 'flush_test_no_global_step')
    if tf.gfile.Exists(output_dir):  # For running on jenkins.
      tf.gfile.DeleteRecursively(output_dir)

    names_to_metrics, names_to_updates = self._create_names_to_metrics(
        self._predictions, self._labels)

    for k in names_to_metrics:
      v = names_to_metrics[k]
      tf.scalar_summary(k, v)

    summary_writer = tf.train.SummaryWriter(output_dir)

    initial_op = tf.group(tf.initialize_all_variables(),
                          tf.initialize_local_variables())
    eval_op = tf.group(*names_to_updates.values())

    with self.test_session() as sess:
      slim.evaluation.evaluation(
          sess,
          initial_op=initial_op,
          eval_op=eval_op,
          summary_op=tf.merge_all_summaries(),
          summary_writer=summary_writer)

      names_to_values = {name: names_to_metrics[name].eval()
                         for name in names_to_metrics}
    self._verify_summaries(output_dir, names_to_values)
lynch.py 文件源码 项目:rl-ofc 作者: DexGroves 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def setup_summaries():
    episode_reward = tf.Variable(0.)
    tf.scalar_summary("Episode Reward", episode_reward)
    r_summary_placeholder = tf.placeholder("float")
    update_ep_reward = episode_reward.assign(r_summary_placeholder)
    ep_avg_v = tf.Variable(0.)
    tf.scalar_summary("Episode Value", ep_avg_v)
    val_summary_placeholder = tf.placeholder("float")
    update_ep_val = ep_avg_v.assign(val_summary_placeholder)
    summary_op = tf.merge_all_summaries()
    return r_summary_placeholder, update_ep_reward, val_summary_placeholder, update_ep_val, summary_op
ofc_a3c.py 文件源码 项目:rl-ofc 作者: DexGroves 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def build_summary_ops():
    """Tensorflow magic episode summary operations.
    I have no idea what this does or how this works."""
    episode_reward = tf.Variable(0.)
    tf.scalar_summary("Episode Reward", episode_reward)
    r_summary_placeholder = tf.placeholder("float")
    update_ep_reward = episode_reward.assign(r_summary_placeholder)
    ep_avg_v = tf.Variable(0.)
    tf.scalar_summary("Episode Value", ep_avg_v)
    val_summary_placeholder = tf.placeholder("float")
    update_ep_val = ep_avg_v.assign(val_summary_placeholder)
    summary_op = tf.merge_all_summaries()
    return (r_summary_placeholder, update_ep_reward, val_summary_placeholder,
            update_ep_val, summary_op)
model.py 文件源码 项目:adversarial-squad 作者: robinjia 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, config):
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W, H = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size, config.max_tree_height
        self.x = tf.placeholder('int32', [None, M, JX], name='x')
        self.cx = tf.placeholder('int32', [None, M, JX, W], name='cx')
        self.q = tf.placeholder('int32', [None, JQ], name='q')
        self.cq = tf.placeholder('int32', [None, JQ, W], name='cq')
        self.tx = tf.placeholder('int32', [None, M, H, JX], name='tx')
        self.tx_edge_mask = tf.placeholder('bool', [None, M, H, JX, JX], name='tx_edge_mask')
        self.y = tf.placeholder('bool', [None, M, H, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')

        # Define misc

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()

        self.ema_op = self._get_ema_op()
        self.summary = tf.merge_all_summaries()


问题


面经


文章

微信
公众号

扫码关注公众号