python类group()的实例源码

tensorflow_backend.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, inputs, outputs, updates=[]):
        if not isinstance(inputs, (list, tuple)):
            raise TypeError('`inputs` to a TensorFlow backend function '
                            'should be a list or tuple.')
        if not isinstance(outputs, (list, tuple)):
            raise TypeError('`outputs` of a TensorFlow backend function '
                            'should be a list or tuple.')
        if not isinstance(updates, (list, tuple)):
            raise TypeError('`updates` in a TensorFlow backend function '
                            'should be a list or tuple.')
        self.inputs = list(inputs)
        self.outputs = list(outputs)
        with tf.control_dependencies(self.outputs):
            updates_ops = []
            for update in updates:
                if isinstance(update, tuple):
                    p, new_p = update
                    updates_ops.append(tf.assign(p, new_p))
                else:
                    # assumed already an op
                    updates_ops.append(update)
            self.updates_op = tf.group(*updates_ops)
a3c.py 文件源码 项目:DeepRL 作者: arnomoonens 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, master, thread_id, clip_gradients=True):
        super(A3CThread, self).__init__(name=thread_id)
        self.thread_id = thread_id
        self.clip_gradients = clip_gradients
        self.env = make_environment(master.env_name)
        self.master = master
        self.config = master.config
        if thread_id == 0 and self.master.monitor:
            self.env = wrappers.Monitor(self.env, master.monitor_path, force=True, video_callable=(None if self.master.video else False))

        # Only used (and overwritten) by agents that use an RNN
        self.initial_features = None

        # Build actor and critic networks
        with tf.variable_scope("t{}_net".format(self.thread_id)):
            self.action, self.value, self.actor_states, self.critic_states, self.actions_taken, self.losses, self.adv, self.r, self.n_steps = self.build_networks()
            self.sync_net = self.create_sync_net_op()
            inc_step = self.master.global_step.assign_add(self.n_steps)
            self.train_op = tf.group(self.make_trainer(), inc_step)
        # Write the summary of each thread in a different directory
        self.writer = tf.summary.FileWriter(os.path.join(self.master.monitor_path, "thread" + str(self.thread_id)), self.master.session.graph)

        self.runner = RunnerThread(self.env, self, 20, thread_id == 0 and self.master.video)
mainPlay.py 文件源码 项目:deep-RL-DQN-tensorflow 作者: ZidanMusk 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def main():

    dqn = DQN(ENV_NAME, DOUBLE_DQN, DUELING_DQN, PER, TRAINING, RENDER)

    init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())

    with tf.Session() as sess:

        sess.run(init_op)
        #tries to restore a trained model and play!
        dqn.util.restore_graph(sess,forTrain = TRAINING)

        for ep in tqdm(range(MAX_EPISODES)):# for episodes

            print("Episode no. {} :".format(ep))

            dqn.playing(sess)

            print('Episode %d: totalEpReward = %.2f , took: %.3f mins' % (ep, dqn.totalReward,dqn.duration/60.0))


#RUN...
cdq.py 文件源码 项目:aaai17-cdq 作者: caoyue10 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def train_deep_networks(self, global_step):

        # Variables that affect learning rate.
        num_batches_per_epoch = self.n_train / self.batch_size
        decay_steps = int(num_batches_per_epoch * self.num_epochs_per_decay)

        # Decay the learning rate exponentially based on the number of steps.
        self.img_lr = tf.train.exponential_decay(self.initial_learning_rate_img, global_step, decay_steps,
                                    self.learning_rate_decay_factor, staircase=True)
        self.img_lr_last = tf.train.exponential_decay(self.initial_learning_rate_img*10, global_step, decay_steps,
                                    self.learning_rate_decay_factor, staircase=True)

        self.txt_lr = tf.train.exponential_decay(self.initial_learning_rate_txt, global_step, decay_steps,
                                    self.learning_rate_decay_factor, staircase=True)
        self.txt_lr_last = tf.train.exponential_decay(self.initial_learning_rate_txt*10, global_step, decay_steps,
                                    self.learning_rate_decay_factor, staircase=True)

        # Compute gradients of deep neural networks, 
        # without Centers and Binary Codes.
        apply_gradient_op_img = tf.train.MomentumOptimizer(learning_rate=self.img_lr, momentum=0.9).minimize(self.total_loss, var_list=self.deep_parameters_img, global_step=global_step)
        apply_gradient_op_img_last = tf.train.MomentumOptimizer(learning_rate=self.img_lr*10, momentum=0.9).minimize(self.total_loss, var_list=self.deep_parameters_img_lastlayer, global_step=global_step)
        apply_gradient_op_txt = tf.train.MomentumOptimizer(learning_rate=self.txt_lr, momentum=0.9).minimize(self.total_loss, var_list=self.deep_parameters_txt+self.deep_parameters_txt_lastlayer, global_step=global_step)
        apply_gradient_op = tf.group(apply_gradient_op_img, apply_gradient_op_img_last, apply_gradient_op_txt)

        return apply_gradient_op
benchmark-googlenet.py 文件源码 项目:ck-tensorflow 作者: ctuning 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def time_tensorflow_run(session, target, info_string):
  num_steps_burn_in = 10
  total_duration = 0.0
  total_duration_squared = 0.0
  if not isinstance(target, list):
    target = [target]
  target_op = tf.group(*target)
  for i in range(FLAGS.num_batches + num_steps_burn_in):
    start_time = time.time()
    _ = session.run(target_op)
    duration = time.time() - start_time
    if i > num_steps_burn_in:
      if not i % 10:
        print ('%s: step %d, duration = %.3f' %
               (datetime.now(), i - num_steps_burn_in, duration))
      total_duration += duration
      total_duration_squared += duration * duration
  mn = total_duration / FLAGS.num_batches
  vr = total_duration_squared / FLAGS.num_batches - mn * mn
  sd = math.sqrt(vr)
  print ('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %
         (datetime.now(), info_string, FLAGS.num_batches, mn, sd))
  return TimingEntry(info_string, datetime.now(), FLAGS.num_batches, mn, sd)
benchmark-overfeat.py 文件源码 项目:ck-tensorflow 作者: ctuning 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def time_tensorflow_run(session, target, info_string):
  num_steps_burn_in = 10
  total_duration = 0.0
  total_duration_squared = 0.0
  if not isinstance(target, list):
    target = [target]
  target_op = tf.group(*target)
  for i in range(FLAGS.num_batches + num_steps_burn_in):
    start_time = time.time()
    _ = session.run(target_op)
    duration = time.time() - start_time
    if i > num_steps_burn_in:
      if not i % 10:
        print ('%s: step %d, duration = %.3f' %
               (datetime.now(), i - num_steps_burn_in, duration))
      total_duration += duration
      total_duration_squared += duration * duration
  mn = total_duration / FLAGS.num_batches
  vr = total_duration_squared / FLAGS.num_batches - mn * mn
  sd = math.sqrt(vr)
  print ('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %
         (datetime.now(), info_string, FLAGS.num_batches, mn, sd))
  return TimingEntry(info_string, datetime.now(), FLAGS.num_batches, mn, sd)
Read_classification_dataset.py 文件源码 项目:Super_TF 作者: Dhruv-Mohan 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def main():
    init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
    dummy_reader = Dataset_reader_classification(filename=_DATASET_PATH_, num_classes=_CLASSES_)
    #dummy_reader.pre_process_image(writer_pre_proc)

    with tf.Session() as sess:
        init_op.run()
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        images, labels = dummy_reader.next_batch(_BATCH_SIZE_)
        meanimage = sess.run([dummy_reader.mean_image])[0]
        print(meanimage)
        print(images[0])
        if _SHOW_IMAGES_ :
            for image in images:
                cv2.imshow('Image', image)
                cv2.imshow('Meanimage',meanimage)
                cv2.waitKey(0)

        coord.request_stop()
        coord.join(threads)
Dataset_reader_segmentation.py 文件源码 项目:Super_TF 作者: Dhruv-Mohan 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, filename=None, epochs=100, num_classes=1):

        super().__init__()
        with tf.name_scope('Dataset_Segmentation_Reader') as scope:
            self.batch_size = tf.placeholder(tf.int32, name='Dataset_batch_size')
            self.num_classes = num_classes
            self.open_dataset(filename=filename, epochs=epochs)
            self.mean_header_proto = proto.Image_set()
            dataset_path, dataset_name = os.path.split(filename)
            common_name, _ = os.path.splitext(dataset_name)
            mean_file_path = os.path.join(dataset_path,common_name +'_header.proto')

            with open(mean_file_path,"rb") as mean_header_file:
                self.mean_header_proto.ParseFromString(mean_header_file.read())
            self.flip_prob = tf.Variable(tf.random_uniform(shape=[1], minval=0, maxval=1, dtype=tf.float32),trainable=False)
            self.crop_prob = tf.Variable(tf.random_uniform(shape=[1],  minval=0, maxval=1, dtype=tf.float32),trainable=False)
            self.crop_val = tf.Variable(tf.random_uniform(shape=[1], minval=1.1, maxval=1.25, dtype=tf.float32),trainable=False)
            self.init_randoms = tf.group(self.flip_prob.initializer, self.crop_prob.initializer, self.crop_val.initializer)
            self.sess = None

            self.image_shape = [self.mean_header_proto.Image_headers.image_width, self.mean_header_proto.Image_headers.image_height, self.mean_header_proto.Image_headers.image_depth]
            self.mask_shape = [self.mean_header_proto.Image_headers.image_width, self.mean_header_proto.Image_headers.image_height, 1]
            self.images , self.masks , self.mask_weights, self.names = self.batch_inputs()
utils.py 文件源码 项目:trpo 作者: jjkke88 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, var_list):
        assigns = []
        shapes = map(var_shape, var_list)
        total_size = sum(np.prod(shape) for shape in shapes)
        self.theta = theta = tf.placeholder(tf.float32, [total_size])
        start = 0
        assigns = []
        for (shape, v) in zip(shapes, var_list):
            size = np.prod(shape)
            assigns.append(
                tf.assign(
                    v,
                    tf.reshape(
                        theta[
                            start:start +
                            size],
                        shape)))
            start += size
        self.op = tf.group(*assigns)
memory.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def make_update_op(self, upd_idxs, upd_keys, upd_vals,
                       batch_size, use_recent_idx, intended_output):
        """Function that creates all the update ops."""
        mem_age_incr = self.mem_age.assign_add(tf.ones([self.memory_size],
                                                       dtype=tf.float32))
        with tf.control_dependencies([mem_age_incr]):
            mem_age_upd = tf.scatter_update(
                self.mem_age, upd_idxs, tf.zeros([batch_size], dtype=tf.float32))

        mem_key_upd = tf.scatter_update(
            self.mem_keys, upd_idxs, upd_keys)
        mem_val_upd = tf.scatter_update(
            self.mem_vals, upd_idxs, upd_vals)

        if use_recent_idx:
            recent_idx_upd = tf.scatter_update(
                self.recent_idx, intended_output, upd_idxs)
        else:
            recent_idx_upd = tf.group()

        return tf.group(mem_age_upd, mem_key_upd, mem_val_upd, recent_idx_upd)
optimizer.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
        with tf.name_scope(name, self._name) as name:
            update_op = self._opt.apply_gradients(
                grads_and_vars, global_step=global_step)
            clip_update_ops = []
            with tf.control_dependencies([update_op]):
                for grad, var in grads_and_vars:
                    if grad is None or var not in self._vars_to_clip_dims:
                        continue
                    with tf.name_scope("clip_" + var.op.name):
                        if isinstance(grad, tf.Tensor):
                            clip_update_ops.append(self._clip_dense(var))
                        else:
                            clip_update_ops.append(
                                self._clip_sparse(grad, var))

            # In case no var was clipped, still need to run the update_op.
            return tf.group(*([update_op] + clip_update_ops), name=name)
optimizer.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
        train_op = self._optimizer.apply_gradients(
            grads_and_vars, global_step=global_step, name=name)
        var_list = [x[1] for x in grads_and_vars if x[0] is not None]
        self._variable_map = {}
        if self._sequential_update:
            with tf.control_dependencies([train_op]):
                ma_op = self._ema.apply(var_list)
        else:
            ma_op = self._ema.apply(var_list)

        for v in var_list:
            v_avg = self._ema.average(v)
            self._variable_map[v.op.name] = v_avg
            self._variable_map[v_avg.op.name] = v
        return tf.group(train_op, ma_op, name="train_with_avg")
nn.py 文件源码 项目:weightnorm 作者: openai 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def adam_updates(params, cost_or_grads, lr=0.001, mom1=0.9, mom2=0.999):
    ''' Adam optimizer '''
    updates = []
    if type(cost_or_grads) is not list:
        grads = tf.gradients(cost_or_grads, params)
    else:
        grads = cost_or_grads
    t = tf.Variable(1., 'adam_t')
    for p, g in zip(params, grads):
        mg = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_mg')
        if mom1>0:
            v = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_v')
            v_t = mom1*v + (1. - mom1)*g
            v_hat = v_t / (1. - tf.pow(mom1,t))
            updates.append(v.assign(v_t))
        else:
            v_hat = g
        mg_t = mom2*mg + (1. - mom2)*tf.square(g)
        mg_hat = mg_t / (1. - tf.pow(mom2,t))
        g_t = v_hat / tf.sqrt(mg_hat + 1e-8)
        p_t = p - lr * g_t
        updates.append(mg.assign(mg_t))
        updates.append(p.assign(p_t))
    updates.append(t.assign_add(1))
    return tf.group(*updates)
Resnet.py 文件源码 项目:DeepLab 作者: 2prime 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _build_train_op(self):
    """Build training specific ops for the graph."""
    self.lrn_rate = tf.constant(self.hps.lrn_rate, tf.float32)
    tf.summary.scalar('learning_rate', self.lrn_rate)

    trainable_variables = tf.trainable_variables()
    grads = tf.gradients(self.cost, trainable_variables)

    if self.hps.optimizer == 'sgd':
      optimizer = tf.train.GradientDescentOptimizer(self.lrn_rate)
    elif self.hps.optimizer == 'mom':
      optimizer = tf.train.MomentumOptimizer(self.lrn_rate, 0.9)

    apply_op = optimizer.apply_gradients(
        zip(grads, trainable_variables),
        global_step=self.global_step, name='train_step')

    train_ops = [apply_op] + self._extra_train_ops
    self.train_op = tf.group(*train_ops)

  # TODO(xpan): Consider batch_norm in contrib/layers/python/layers/layers.py
evaluation_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def testFinalOpsOnEvaluationLoop(self):
    value_op, update_op = slim.metrics.streaming_accuracy(
        self._predictions, self._labels)
    init_op = tf.group(tf.initialize_all_variables(),
                       tf.initialize_local_variables())
    # Create Checkpoint and log directories
    chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
    gfile.MakeDirs(chkpt_dir)
    logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
    gfile.MakeDirs(logdir)

    # Save initialized variables to checkpoint directory
    saver = tf.train.Saver()
    with self.test_session() as sess:
      init_op.run()
      saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))

    # Now, run the evaluation loop:
    accuracy_value = slim.evaluation.evaluation_loop(
        '', chkpt_dir, logdir, eval_op=update_op, final_op=value_op,
        max_number_of_evaluations=1)
    self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
evaluation_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def testRestoredModelPerformance(self):
    checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
    log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')

    # First, save out the current model to a checkpoint:
    init_op = tf.group(tf.initialize_all_variables(),
                       tf.initialize_local_variables())
    saver = tf.train.Saver()
    with self.test_session() as sess:
      sess.run(init_op)
      saver.save(sess, checkpoint_path)

    # Next, determine the metric to evaluate:
    value_op, update_op = slim.metrics.streaming_accuracy(
        self._predictions, self._labels)

    # Run the evaluation and verify the results:
    accuracy_value = slim.evaluation.evaluate_once(
        '',
        checkpoint_path,
        log_dir,
        eval_op=update_op,
        final_op=value_op)
    self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
graph_actions_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def test_train_loss(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      tf.contrib.framework.create_global_step()
      loss_var = tf.contrib.framework.local_variable(10.0)
      train_op = tf.group(
          tf.assign_add(tf.contrib.framework.get_global_step(), 1),
          tf.assign_add(loss_var, -1.0))
      self._assert_summaries(self._output_dir)
      self._assert_ckpt(self._output_dir, False)
      loss = learn.graph_actions._monitored_train(  # pylint: disable=protected-access
          g,
          output_dir=self._output_dir,
          train_op=train_op,
          loss_op=loss_var.value(),
          steps=6)
      self.assertEqual(4.0, loss)
      self._assert_summaries(self._output_dir, expected_graphs=[g])
      self._assert_ckpt(self._output_dir, True)
topn.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def insert(self, ids, scores):
    """Insert the ids and scores into the TopN."""
    with tf.control_dependencies(self.last_ops):
      scatter_op = tf.scatter_update(self.id_to_score, ids, scores)
      larger_scores = tf.greater(scores, self.sl_scores[0])

      def shortlist_insert():
        larger_ids = tf.boolean_mask(tf.to_int64(ids), larger_scores)
        larger_score_values = tf.boolean_mask(scores, larger_scores)
        shortlist_ids, new_ids, new_scores = self.ops.top_n_insert(
            self.sl_ids, self.sl_scores, larger_ids, larger_score_values)
        u1 = tf.scatter_update(self.sl_ids, shortlist_ids, new_ids)
        u2 = tf.scatter_update(self.sl_scores, shortlist_ids, new_scores)
        return tf.group(u1, u2)

      # We only need to insert into the shortlist if there are any
      # scores larger than the threshold.
      cond_op = tf.cond(
          tf.reduce_any(larger_scores), shortlist_insert, tf.no_op)
      with tf.control_dependencies([cond_op]):
        self.last_ops = [scatter_op, cond_op]
factorization_ops.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def scatter_update(cls, factor, indices, values, sharding_func):
    """Helper function for doing sharded scatter update."""
    assert isinstance(factor, list)
    if len(factor) == 1:
      with ops.colocate_with(factor[0]):
        # TODO(agarwal): assign instead of scatter update for full batch update.
        return tf.scatter_update(factor[0], indices, values).op
    else:
      num_shards = len(factor)
      assignments, new_ids = sharding_func(indices)
      assert assignments is not None
      assignments = tf.cast(assignments, tf.int32)
      sharded_ids = tf.dynamic_partition(new_ids, assignments, num_shards)
      sharded_values = tf.dynamic_partition(values, assignments, num_shards)
      updates = []
      for i in xrange(num_shards):
        updates.append(tf.scatter_update(factor[i],
                                         sharded_ids[i],
                                         sharded_values[i]))
      return tf.group(*updates)
evaluation_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def testFinalOpsOnEvaluationLoop(self):
    value_op, update_op = slim.metrics.streaming_accuracy(
        self._predictions, self._labels)
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    # Create Checkpoint and log directories
    chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
    gfile.MakeDirs(chkpt_dir)
    logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
    gfile.MakeDirs(logdir)

    # Save initialized variables to checkpoint directory
    saver = tf.train.Saver()
    with self.test_session() as sess:
      init_op.run()
      saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))

    # Now, run the evaluation loop:
    accuracy_value = slim.evaluation.evaluation_loop(
        '', chkpt_dir, logdir, eval_op=update_op, final_op=value_op,
        max_number_of_evaluations=1)
    self.assertAlmostEqual(accuracy_value, self._expected_accuracy)


问题


面经


文章

微信
公众号

扫码关注公众号