python类get_collection()的实例源码

ocr.py 文件源码 项目:tf-cnn-lstm-ocr-captcha 作者: Luonic 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def inference(images, batch_size, train):
  """Build the ocr model.

  Args:
    images: Images returned from distorted_inputs() or inputs().

  Returns:
    Logits.
  """
  features, timesteps = convolutional_layers(images, batch_size, train)
  logits = get_lstm_layers(features, timesteps, batch_size)
  return logits, timesteps


  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  # return tf.add_n(tf.get_collection('losses'), name='total_loss')
ocr.py 文件源码 项目:tf-cnn-lstm-ocr-captcha 作者: Luonic 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def _add_loss_summaries(total_loss):
  """Add summaries for losses in ocr model.

  Generates moving average for all losses and associated summaries for
  visualizing the performance of the network.

  Args:
    total_loss: Total loss from loss().
  Returns:
    loss_averages_op: op for generating moving averages of losses.
  """
  # Compute the moving average of all individual losses and the total loss.
  loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
  losses = tf.get_collection('losses')
  loss_averages_op = loss_averages.apply(losses + [total_loss])

  # Attach a scalar summary to all individual losses and the total loss; do the
  # same for the averaged version of the losses.
  for l in losses + [total_loss]:
    # Name each loss as '(raw)' and name the moving average version of the loss
    # as the original loss name.
    tf.summary.scalar(l.op.name + ' (raw)', l)
    tf.summary.scalar(l.op.name, loss_averages.average(l))

  return loss_averages_op
ocr.py 文件源码 项目:tf-cnn-lstm-ocr-captcha 作者: Luonic 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def train_simple(total_loss, global_step):
  with tf.variable_scope('train_op'):
    # Variables that affect learning rate.
    num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
    decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)

    # Decay the learning rate exponentially based on the number of steps.
    lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
                                    global_step,
                                    decay_steps,
                                    LEARNING_RATE_DECAY_FACTOR,
                                    staircase=True)
    tf.summary.scalar('learning_rate', lr)

    # update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    # with tf.control_dependencies(update_ops):
        # opt = tf.train.MomentumOptimizer(lr, 0.9).minimize(total_loss, global_step=global_step)
    opt = tf.train.AdamOptimizer(lr).minimize(total_loss, global_step=global_step)

    tf.summary.scalar(total_loss.op.name + ' (raw)', total_loss)
  return opt, lr
eval_segmentation.py 文件源码 项目:taskcv-2017-public 作者: VisionLearningGroup 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def collect_vars(scope, start=None, end=None, prepend_scope=None):
    vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)
    var_dict = OrderedDict()
    if isinstance(start, str):
        for i, var in enumerate(vars):
            var_name = remove_first_scope(var.op.name)
            if var_name.startswith(start):
                start = i
                break
    if isinstance(end, str):
        for i, var in enumerate(vars):
            var_name = remove_first_scope(var.op.name)
            if var_name.startswith(end):
                end = i
                break
    for var in vars[start:end]:
        var_name = remove_first_scope(var.op.name)
        if prepend_scope is not None:
            var_name = os.path.join(prepend_scope, var_name)
        var_dict[var_name] = var
    return var_dict
util.py 文件源码 项目:taskcv-2017-public 作者: VisionLearningGroup 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def collect_vars(scope, start=None, end=None, prepend_scope=None):
    vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)
    var_dict = OrderedDict()
    if isinstance(start, str):
        for i, var in enumerate(vars):
            var_name = remove_first_scope(var.op.name)
            if var_name.startswith(start):
                start = i
                break
    if isinstance(end, str):
        for i, var in enumerate(vars):
            var_name = remove_first_scope(var.op.name)
            if var_name.startswith(end):
                end = i
                break
    for var in vars[start:end]:
        var_name = remove_first_scope(var.op.name)
        if prepend_scope is not None:
            var_name = os.path.join(prepend_scope, var_name)
        var_dict[var_name] = var
    return var_dict
model_deploy_test.py 文件源码 项目:isbi2017-part3 作者: learningtitans 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def testCreateLogisticClassifier(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = LogisticClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      clone = clones[0]
      self.assertEqual(len(slim.get_variables()), 2)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, 'CPU:0')
        self.assertDeviceEqual(v.value().device, 'CPU:0')
      self.assertEqual(clone.outputs.op.name,
                       'LogisticClassifier/fully_connected/Sigmoid')
      self.assertEqual(clone.scope, '')
      self.assertDeviceEqual(clone.device, '')
      self.assertEqual(len(slim.losses.get_losses()), 1)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(update_ops, [])
model_deploy_test.py 文件源码 项目:isbi2017-part3 作者: learningtitans 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def testCreateSingleclone(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      clone = clones[0]
      self.assertEqual(len(slim.get_variables()), 5)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, 'CPU:0')
        self.assertDeviceEqual(v.value().device, 'CPU:0')
      self.assertEqual(clone.outputs.op.name,
                       'BatchNormClassifier/fully_connected/Sigmoid')
      self.assertEqual(clone.scope, '')
      self.assertDeviceEqual(clone.device, '')
      self.assertEqual(len(slim.losses.get_losses()), 1)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(len(update_ops), 2)
model_deploy_test.py 文件源码 项目:isbi2017-part3 作者: learningtitans 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def testCreateLogisticClassifier(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = LogisticClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      self.assertEqual(len(slim.get_variables()), 2)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(update_ops, [])

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
      total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
                                                                optimizer)
      self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
      self.assertEqual(total_loss.op.name, 'total_loss')
      for g, v in grads_and_vars:
        self.assertDeviceEqual(g.device, '')
        self.assertDeviceEqual(v.device, 'CPU:0')
model_deploy_test.py 文件源码 项目:isbi2017-part3 作者: learningtitans 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def testCreateSingleclone(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      self.assertEqual(len(slim.get_variables()), 5)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(len(update_ops), 2)

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
      total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
                                                                optimizer)
      self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
      self.assertEqual(total_loss.op.name, 'total_loss')
      for g, v in grads_and_vars:
        self.assertDeviceEqual(g.device, '')
        self.assertDeviceEqual(v.device, 'CPU:0')
train_image_classifier.py 文件源码 项目:isbi2017-part3 作者: learningtitans 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def _get_variables_to_train():
  """Returns a list of variables to train.

  Returns:
    A list of variables to train by the optimizer.
  """
  if FLAGS.trainable_scopes is None:
    return tf.trainable_variables()
  else:
    scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]

  variables_to_train = []
  for scope in scopes:
    variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
    variables_to_train.extend(variables)
  return variables_to_train
models.py 文件源码 项目:mlc2017-online 作者: machine-learning-challenge 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_collection(self, global_step):
    W1 = tf.get_collection("W1")[0]
    W2 = tf.get_collection("W2")[0]
    observations = tf.get_collection("observations")[0]
    probability = tf.get_collection("probability")[0]
    advantages = tf.get_collection("advantages")[0]
    W1Grad = tf.get_collection("W1Grad")[0]
    updateGrads = tf.get_collection("updateGrads")[0]
    W2Grad = tf.get_collection("W2Grad")[0]
    newGrads1 = tf.get_collection("newGrads1")[0]
    newGrads2 = tf.get_collection("newGrads2")[0]
    newGrads = [newGrads1, newGrads2]

    self.global_step = global_step
    self.W1 = W1
    self.W2 = W2
    self.observations = observations
    self.probability = probability
    self.advantages = advantages
    self.W1Grad = W1Grad
    self.updateGrads = updateGrads
    self.W2Grad = W2Grad
    self.newGrads = newGrads

  # Before training, any initialization code
value_functions.py 文件源码 项目:baselines 作者: openai 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, ob_dim, ac_dim): #pylint: disable=W0613
        X = tf.placeholder(tf.float32, shape=[None, ob_dim*2+ac_dim*2+2]) # batch of observations
        vtarg_n = tf.placeholder(tf.float32, shape=[None], name='vtarg')
        wd_dict = {}
        h1 = tf.nn.elu(dense(X, 64, "h1", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict))
        h2 = tf.nn.elu(dense(h1, 64, "h2", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict))
        vpred_n = dense(h2, 1, "hfinal", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict)[:,0]
        sample_vpred_n = vpred_n + tf.random_normal(tf.shape(vpred_n))
        wd_loss = tf.get_collection("vf_losses", None)
        loss = U.mean(tf.square(vpred_n - vtarg_n)) + tf.add_n(wd_loss)
        loss_sampled = U.mean(tf.square(vpred_n - tf.stop_gradient(sample_vpred_n)))
        self._predict = U.function([X], vpred_n)
        optim = kfac.KfacOptimizer(learning_rate=0.001, cold_lr=0.001*(1-0.9), momentum=0.9, \
                                    clip_kl=0.3, epsilon=0.1, stats_decay=0.95, \
                                    async=1, kfac_update=2, cold_iter=50, \
                                    weight_decay_dict=wd_dict, max_grad_norm=None)
        vf_var_list = []
        for var in tf.trainable_variables():
            if "vf" in var.name:
                vf_var_list.append(var)

        update_op, self.q_runner = optim.minimize(loss, loss_sampled, var_list=vf_var_list)
        self.do_update = U.function([X, vtarg_n], update_op) #pylint: disable=E1101
        U.initialize() # Initialize uninitialized TF variables
model.py 文件源码 项目:GAN-Sentence 作者: huseinzol05 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, num_layers, size_layer, dimension_input, len_noise, sequence_size, learning_rate):
        self.noise = tf.placeholder(tf.float32, [None, None, len_noise])
        self.fake_input = tf.placeholder(tf.float32, [None, None, dimension_input])
        self.true_sentence = tf.placeholder(tf.float32, [None, None, dimension_input])
        self.initial_layer = generator_encode(self.noise, num_layers, size_layer, len_noise)
        self.final_outputs = generator_sentence(self.fake_input, self.initial_layer, num_layers, size_layer, dimension_input)
        fake_logits = discriminator(self.final_outputs, num_layers, size_layer, dimension_input)
        true_logits = discriminator(self.true_sentence, num_layers, size_layer, dimension_input, reuse = True)
        d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = true_logits, labels = tf.ones_like(true_logits)))
        d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = fake_logits, labels = tf.zeros_like(fake_logits)))
        self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = fake_logits, labels = tf.ones_like(fake_logits)))

        self.d_loss = d_loss_real + d_loss_fake
        d_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'discriminator')
        g_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'generator_encode') + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'generator_sentence')
        self.d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1 = 0.5).minimize(self.d_loss, var_list = d_vars)
        self.g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1 = 0.5).minimize(self.g_loss, var_list = g_vars)
base_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def build(self):
        self.add_placeholders()


        xavier = tf.contrib.layers.xavier_initializer(seed=1234)
        inputs, output_embed_matrix = self.add_input_op(xavier)

        # the encoder
        with tf.variable_scope('RNNEnc', initializer=xavier):
            enc_hidden_states, enc_final_state = self.add_encoder_op(inputs=inputs)
        self.final_encoder_state = enc_final_state

        # the training decoder
        with tf.variable_scope('RNNDec', initializer=xavier):
            train_preds = self.add_decoder_op(enc_final_state=enc_final_state, enc_hidden_states=enc_hidden_states, output_embed_matrix=output_embed_matrix, training=True)
        self.loss = self.add_loss_op(train_preds) + self.add_regularization_loss()
        self.train_op = self.add_training_op(self.loss)

        # the inference decoder
        with tf.variable_scope('RNNDec', initializer=xavier, reuse=True):
            eval_preds = self.add_decoder_op(enc_final_state=enc_final_state, enc_hidden_states=enc_hidden_states, output_embed_matrix=output_embed_matrix, training=False)
        self.pred = self.finalize_predictions(eval_preds)
        self.eval_loss = self.add_loss_op(eval_preds)

        weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        size = 0
        def get_size(w):
            shape = w.get_shape()
            if shape.ndims == 2:
                return int(shape[0])*int(shape[1])
            else:
                assert shape.ndims == 1
                return int(shape[0])
        for w in weights:
            sz = get_size(w)
            print('weight', w, sz)
            size += sz
        print('total model size', size)
base_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def add_regularization_loss(self):
        weights = [w for w in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if w.name.split('/')[-1] in ('kernel:0', 'weights:0')]

        if self.config.l2_regularization == 0.0:
            return 0

        return tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(self.config.l2_regularization), weights)
nn_skeleton.py 文件源码 项目:squeezeDet-hand 作者: fyhtea 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _add_loss_summaries(total_loss):
  """Add summaries for losses
  Generates loss summaries for visualizing the performance of the network.
  Args:
    total_loss: Total loss from loss().
  """
  losses = tf.get_collection('losses')

  # Attach a scalar summary to all individual losses and the total loss; do the
  # same for the averaged version of the losses.
  for l in losses + [total_loss]:
    tf.summary.scalar(l.op.name, l)
check_video_id.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def check_video_id():
  tf.set_random_seed(0)  # for reproducibility
  with tf.Graph().as_default():
    # convert feature_names and feature_sizes to lists of values
    feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
        FLAGS.feature_names, FLAGS.feature_sizes)

    # prepare a reader for each single model prediction result
    all_readers = []

    all_patterns = FLAGS.eval_data_patterns
    all_patterns = map(lambda x: x.strip(), all_patterns.strip().strip(",").split(","))
    for i in xrange(len(all_patterns)):
      reader = readers.EnsembleReader(
          feature_names=feature_names, feature_sizes=feature_sizes)
      all_readers.append(reader)

    input_reader = None
    input_data_pattern = None
    if FLAGS.input_data_pattern is not None:
      input_reader = readers.EnsembleReader(
          feature_names=["mean_rgb","mean_audio"], feature_sizes=[1024,128])
      input_data_pattern = FLAGS.input_data_pattern

    if FLAGS.eval_data_patterns is "":
      raise IOError("'eval_data_patterns' was not specified. " +
                     "Nothing to evaluate.")

    build_graph(
        all_readers=all_readers,
        input_reader=input_reader,
        input_data_pattern=input_data_pattern,
        all_eval_data_patterns=all_patterns,
        batch_size=FLAGS.batch_size)

    logging.info("built evaluation graph")
    video_id_equal = tf.get_collection("video_id_equal")[0]
    input_distance = tf.get_collection("input_distance")[0]

    check_loop(video_id_equal, input_distance, all_patterns)
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def __init__(self, checkpoint_file):

        checkpoint_dir = os.path.dirname(checkpoint_file)
        hparams_file = os.path.join(checkpoint_dir, "hparams.txt")
        hparams_dict = {}
        if os.path.isfile(hparams_file):
            with open(hparams_file) as f:
                hparams_dict = ast.literal_eval(f.read())
        self.hparams = TensorflowClassifierHparams(**hparams_dict)
        self.graph = tf.Graph()
        with self.graph.as_default():
            print("loading from file {}".format(checkpoint_file))
            config = tf.ConfigProto(
                device_count={'GPU': 0}, )
            config.gpu_options.visible_device_list = ""
            self.session = tf.Session(config=config)
            new_saver = tf.train.import_meta_graph(checkpoint_file + ".meta", clear_devices=True)
            new_saver.restore(self.session, checkpoint_file)

            self.features = {}

            if self.hparams.use_image:
                self.features["image"] = self.graph.get_tensor_by_name("image:0")
            if self.hparams.use_observation:
                self.features["observation"] = self.graph.get_tensor_by_name("observation:0")
            if self.hparams.use_action:
                self.features["action"] = self.graph.get_tensor_by_name("action:0")
            self.prediction = tf.get_collection('prediction')[0]
            self.loss = tf.get_collection('loss')[0]
            self.threshold = tf.get_collection('threshold')[0]
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, checkpoint_file):

        checkpoint_dir = os.path.dirname(checkpoint_file)
        hparams_file = os.path.join(checkpoint_dir, "hparams.txt")
        hparams_dict = {}
        if os.path.isfile(hparams_file):
            with open(hparams_file) as f:
                hparams_dict = ast.literal_eval(f.read())
        self.hparams = TensorflowClassifierHparams(**hparams_dict)
        self.graph = tf.Graph()
        with self.graph.as_default():
            print("loading from file {}".format(checkpoint_file))
            config = tf.ConfigProto(
                device_count={'GPU': 0}, )
            config.gpu_options.visible_device_list = ""
            self.session = tf.Session(config=config)
            new_saver = tf.train.import_meta_graph(checkpoint_file + ".meta", clear_devices=True)
            new_saver.restore(self.session, checkpoint_file)

            self.features = {}

            if self.hparams.use_image:
                self.features["image"] = self.graph.get_tensor_by_name("image:0")
            if self.hparams.use_observation:
                self.features["observation"] = self.graph.get_tensor_by_name("observation:0")
            if self.hparams.use_action:
                self.features["action"] = self.graph.get_tensor_by_name("action:0")
            self.prediction = tf.get_collection('prediction')[0]
            self.loss = tf.get_collection('loss')[0]
            self.threshold = tf.get_collection('threshold')[0]
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, checkpoint_file):

        checkpoint_dir = os.path.dirname(checkpoint_file)
        hparams_file = os.path.join(checkpoint_dir, "hparams.txt")
        hparams_dict = {}
        if os.path.isfile(hparams_file):
            with open(hparams_file) as f:
                hparams_dict = ast.literal_eval(f.read())
        self.hparams = TensorflowClassifierHparams(**hparams_dict)
        self.graph = tf.Graph()
        with self.graph.as_default():
            print("loading from file {}".format(checkpoint_file))
            config = tf.ConfigProto(
                device_count={'GPU': 0}, )
            config.gpu_options.visible_device_list = ""
            self.session = tf.Session(config=config)
            new_saver = tf.train.import_meta_graph(checkpoint_file + ".meta", clear_devices=True)
            new_saver.restore(self.session, checkpoint_file)

            self.features = {}

            if self.hparams.use_image:
                self.features["image"] = self.graph.get_tensor_by_name("image:0")
            if self.hparams.use_observation:
                self.features["observation"] = self.graph.get_tensor_by_name("observation:0")
            if self.hparams.use_action:
                self.features["action"] = self.graph.get_tensor_by_name("action:0")
            self.prediction = tf.get_collection('prediction')[0]
            self.loss = tf.get_collection('loss')[0]
            self.threshold = tf.get_collection('threshold')[0]


问题


面经


文章

微信
公众号

扫码关注公众号