python类trainable_variables()的实例源码

mnist.py 文件源码 项目:hyperchamber 作者: 255BITS 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def create(config):
    batch_size = config["batch_size"]
    x = tf.placeholder(tf.float32, [batch_size, X_DIMS[0], X_DIMS[1], 1], name="x")
    y = tf.placeholder(tf.float32, [batch_size, Y_DIMS], name="y")

    hidden = hidden_layers(config, x)
    output = output_layer(config, hidden)

    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(output, y), name="loss")

    output = tf.nn.softmax(output)
    correct_prediction = tf.equal(tf.argmax(output,1), tf.argmax(y,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    variables = tf.trainable_variables()

    optimizer = tf.train.GradientDescentOptimizer(config['learning_rate']).minimize(loss)


    set_tensor("x", x)
    set_tensor("y", y)
    set_tensor("loss", loss)
    set_tensor("optimizer", optimizer)
    set_tensor("accuracy", accuracy)
resnet_model.py 文件源码 项目:deep_learning_study 作者: jowettcz 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _build_train_op(self):
    """Build training specific ops for the graph."""
    self.lrn_rate = tf.constant(self.hps.lrn_rate, tf.float32)
    tf.summary.scalar('learning_rate', self.lrn_rate)

    trainable_variables = tf.trainable_variables()
    grads = tf.gradients(self.cost, trainable_variables)

    if self.hps.optimizer == 'sgd':
      optimizer = tf.train.GradientDescentOptimizer(self.lrn_rate)
    elif self.hps.optimizer == 'mom':
      optimizer = tf.train.MomentumOptimizer(self.lrn_rate, 0.9)

    apply_op = optimizer.apply_gradients(
        zip(grads, trainable_variables),
        global_step=self.global_step, name='train_step')

    train_ops = [apply_op] + self._extra_train_ops
    self.train_op = tf.group(*train_ops)

  # TODO(xpan): Consider batch_norm in contrib/layers/python/layers/layers.py
summarizer.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def summarize_variables(train_vars=None, summary_collection="tflearn_summ"):
    """ summarize_variables.

    Arguemnts:
        train_vars: list of `Variable`. The variable weights to monitor.
        summary_collection: A collection to add this summary to and
            also used for returning a merged summary over all its elements.
            Default: 'tflearn_summ'.

    Returns:
        `Tensor`. Merge of all summary in 'summary_collection'

    """
    if not train_vars: train_vars = tf.trainable_variables()
    summaries.add_trainable_vars_summary(train_vars, "", "", summary_collection)
    return merge_summary(tf.get_collection(summary_collection))
stop_gradient_test.py 文件源码 项目:sonnet 作者: deepmind 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def testUsage(self):
    with tf.variable_scope("", custom_getter=snt.custom_getters.stop_gradient):
      lin1 = snt.Linear(10, name="linear1")

    x = tf.placeholder(tf.float32, [10, 10])
    y = lin1(x)

    variables = tf.trainable_variables()
    variable_names = [v.name for v in variables]

    self.assertEqual(2, len(variables))

    self.assertIn("linear1/w:0", variable_names)
    self.assertIn("linear1/b:0", variable_names)

    grads = tf.gradients(y, variables)

    names_to_grads = {var.name: grad for var, grad in zip(variables, grads)}

    self.assertEqual(None, names_to_grads["linear1/w:0"])
    self.assertEqual(None, names_to_grads["linear1/b:0"])
basic_test.py 文件源码 项目:sonnet 作者: deepmind 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def testCustomGetter(self):
    """Check that custom getters work appropriately."""

    def custom_getter(getter, *args, **kwargs):
      kwargs["trainable"] = False
      return getter(*args, **kwargs)

    inputs = tf.placeholder(tf.float32, shape=[self.batch_size, self.in_size])

    # Make w and b non-trainable.
    lin1 = snt.Linear(output_size=self.out_size,
                      custom_getter=custom_getter)
    lin1(inputs)
    self.assertEqual(0, len(tf.trainable_variables()))
    self.assertEqual(2, len(tf.global_variables()))

    # Make w non-trainable.
    lin2 = snt.Linear(output_size=self.out_size,
                      custom_getter={"w": custom_getter})
    lin2(inputs)
    self.assertEqual(1, len(tf.trainable_variables()))
    self.assertEqual(4, len(tf.global_variables()))
trainer.py 文件源码 项目:StackGAN 作者: hanzhanggit 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def prepare_trainer(self, generator_loss, discriminator_loss):
        '''Helper function for init_opt'''
        all_vars = tf.trainable_variables()

        g_vars = [var for var in all_vars if
                  var.name.startswith('g_')]
        d_vars = [var for var in all_vars if
                  var.name.startswith('d_')]

        generator_opt = tf.train.AdamOptimizer(self.generator_lr,
                                               beta1=0.5)
        self.generator_trainer =\
            pt.apply_optimizer(generator_opt,
                               losses=[generator_loss],
                               var_list=g_vars)
        discriminator_opt = tf.train.AdamOptimizer(self.discriminator_lr,
                                                   beta1=0.5)
        self.discriminator_trainer =\
            pt.apply_optimizer(discriminator_opt,
                               losses=[discriminator_loss],
                               var_list=d_vars)
        self.log_vars.append(("g_learning_rate", self.generator_lr))
        self.log_vars.append(("d_learning_rate", self.discriminator_lr))
trainer.py 文件源码 项目:StackGAN 作者: hanzhanggit 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def build_model(self, sess):
        self.init_opt()

        sess.run(tf.initialize_all_variables())
        if len(self.model_path) > 0:
            print("Reading model parameters from %s" % self.model_path)
            all_vars = tf.trainable_variables()
            # all_vars = tf.all_variables()
            restore_vars = []
            for var in all_vars:
                if var.name.startswith('g_') or var.name.startswith('d_'):
                    restore_vars.append(var)
                    # print(var.name)
            saver = tf.train.Saver(restore_vars)
            saver.restore(sess, self.model_path)

            istart = self.model_path.rfind('_') + 1
            iend = self.model_path.rfind('.')
            counter = self.model_path[istart:iend]
            counter = int(counter)
        else:
            print("Created model with fresh parameters.")
            counter = 0
        return counter
train_image_classifier_y.py 文件源码 项目:spoofnet-tensorflow 作者: yomna-safaa 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _get_variables_to_train():
  """Returns a list of variables to train.

  Returns:
    A list of variables to train by the optimizer.
  """
  if FLAGS.trainable_scopes is None:
    return tf.trainable_variables()
  else:
    scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]

  variables_to_train = []
  for scope in scopes:
    variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
    variables_to_train.extend(variables)
  return variables_to_train


########################################################################
graph_handler.py 文件源码 项目:bi-att-flow 作者: allenai 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _load(self, sess):
        config = self.config
        vars_ = {var.name.split(":")[0]: var for var in tf.all_variables()}
        if config.load_ema:
            ema = self.model.var_ema
            for var in tf.trainable_variables():
                del vars_[var.name.split(":")[0]]
                vars_[ema.average_name(var)] = var
        saver = tf.train.Saver(vars_, max_to_keep=config.max_to_keep)

        if config.load_path:
            save_path = config.load_path
        elif config.load_step > 0:
            save_path = os.path.join(config.save_dir, "{}-{}".format(config.model_name, config.load_step))
        else:
            save_dir = config.save_dir
            checkpoint = tf.train.get_checkpoint_state(save_dir)
            assert checkpoint is not None, "cannot load checkpoint at {}".format(save_dir)
            save_path = checkpoint.model_checkpoint_path
        print("Loading saved model from {}".format(save_path))
        saver.restore(sess, save_path)
nvdm.py 文件源码 项目:variational-text-tensorflow 作者: carpedm20 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def build_model(self):
    self.x = tf.placeholder(tf.float32, [self.reader.vocab_size], name="input")
    self.x_idx = tf.placeholder(tf.int32, [None], name="x_idx")

    self.build_encoder()
    self.build_generator()

    # Kullback Leibler divergence
    self.e_loss = -0.5 * tf.reduce_sum(1 + self.log_sigma_sq - tf.square(self.mu) - tf.exp(self.log_sigma_sq))

    # Log likelihood
    self.g_loss = -tf.reduce_sum(tf.log(tf.gather(self.p_x_i, self.x_idx) + 1e-10))

    self.loss = self.e_loss + self.g_loss

    self.encoder_var_list, self.generator_var_list = [], []
    for var in tf.trainable_variables():
      if "encoder" in var.name:
        self.encoder_var_list.append(var)
      elif "generator" in var.name:
        self.generator_var_list.append(var)

    # optimizer for alternative update
    self.optim_e = tf.train.AdamOptimizer(learning_rate=self.lr) \
                         .minimize(self.e_loss, global_step=self.step, var_list=self.encoder_var_list)
    self.optim_g = tf.train.AdamOptimizer(learning_rate=self.lr) \
                         .minimize(self.g_loss, global_step=self.step, var_list=self.generator_var_list)

    # optimizer for one shot update
    self.optim = tf.train.AdamOptimizer(learning_rate=self.lr) \
                         .minimize(self.loss, global_step=self.step)

    _ = tf.scalar_summary("encoder loss", self.e_loss)
    _ = tf.scalar_summary("generator loss", self.g_loss)
    _ = tf.scalar_summary("total loss", self.loss)
nn_skeleton.py 文件源码 项目:squeezeDet-hand 作者: fyhtea 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _add_train_graph(self):
    """Define the training operation."""
    mc = self.mc

    self.global_step = tf.Variable(0, name='global_step', trainable=False)
    lr = tf.train.exponential_decay(mc.LEARNING_RATE,
                                    self.global_step,
                                    mc.DECAY_STEPS,
                                    mc.LR_DECAY_FACTOR,
                                    staircase=True)

    tf.summary.scalar('learning_rate', lr)

    _add_loss_summaries(self.loss)

    opt = tf.train.MomentumOptimizer(learning_rate=lr, momentum=mc.MOMENTUM)
    grads_vars = opt.compute_gradients(self.loss, tf.trainable_variables())

    with tf.variable_scope('clip_gradient') as scope:
      for i, (grad, var) in enumerate(grads_vars):
        grads_vars[i] = (tf.clip_by_norm(grad, mc.MAX_GRAD_NORM), var)

    apply_gradient_op = opt.apply_gradients(grads_vars, global_step=self.global_step)

    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name, var)

    for grad, var in grads_vars:
      if grad is not None:
        tf.summary.histogram(var.op.name + '/gradients', grad)

    with tf.control_dependencies([apply_gradient_op]):
      self.train_op = tf.no_op(name='train')
graph_definition.py 文件源码 项目:skiprnn-2017-telecombcn 作者: imatge-upc 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def compute_gradients(loss, learning_rate, gradient_clipping=-1):
    """
    Create optimizer, compute gradients and (optionally) apply gradient clipping
    """
    opt = tf.train.AdamOptimizer(learning_rate)
    if gradient_clipping > 0:
        vars_to_optimize = tf.trainable_variables()
        grads, _ = tf.clip_by_global_norm(tf.gradients(loss, vars_to_optimize), clip_norm=gradient_clipping)
        grads_and_vars = list(zip(grads, vars_to_optimize))
    else:
        grads_and_vars = opt.compute_gradients(loss)
    return opt, grads_and_vars
Gan.py 文件源码 项目:ICGan-tensorflow 作者: zhangqianhui 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def build_model1(self):

        #Constructing the Gan
        #Get the variables

        self.fake_images = self.generate(self.z, self.y, weights=self.weights1, biases=self.biases1)

        # the loss of dis network
        self.D_pro = self.discriminate(self.images, self.y, self.weights2, self.biases2, False)

        self.G_pro = self.discriminate(self.fake_images, self.y, self.weights2, self.biases2, True)

        self.G_fake_loss = -tf.reduce_mean(tf.log(self.G_pro + TINY))
        self.loss = -tf.reduce_mean(tf.log(1. - self.G_pro + TINY) + tf.log(self.D_pro + TINY))

        self.log_vars.append(("generator_loss", self.G_fake_loss))
        self.log_vars.append(("discriminator_loss", self.loss))

        t_vars = tf.trainable_variables()

        self.d_vars = [var for var in t_vars if 'dis' in var.name]
        self.g_vars = [var for var in t_vars if 'gen' in var.name]

        self.saver = tf.train.Saver(self.g_vars)

        for k, v in self.log_vars:
            tf.summary.scalar(k, v)

    #Training the Encode_z
Gan.py 文件源码 项目:ICGan-tensorflow 作者: zhangqianhui 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def build_model3(self):

        self.weights4, self.biases4 = self.get_en_y_variables()
        # Training Ey
        self.e_y = self.encode_y(self.images, weights=self.weights4, biases=self.biases4)

        self.loss_y = tf.reduce_mean(tf.square(self.e_y - self.y))

        t_vars = tf.trainable_variables()

        self.eny_vars = [var for var in t_vars if 'eny' in var.name]

        self.saver_y = tf.train.Saver(self.eny_vars)

    #Test model
HiPMDP.py 文件源码 项目:hip-mdp-public 作者: dtak 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __initialize_DDQN(self):
        """Initialize Double DQN."""
        tf.reset_default_graph()
        self.mainDQN = Qnetwork(self.num_dims, self.num_actions, clip=self.grad_clip, activation_fn=tf.nn.relu, hidden_layer_sizes=self.ddqn_hidden_layer_sizes)
        self.targetDQN = Qnetwork(self.num_dims, self.num_actions, clip=self.grad_clip, activation_fn=tf.nn.relu, hidden_layer_sizes=self.ddqn_hidden_layer_sizes)
        init = tf.global_variables_initializer()
        self.trainables = tf.trainable_variables()
        self.targetOps = self.__update_target_graph()
        self.sess = tf.Session()
        self.sess.run(init)
        self.__update_target()
mnist_gan.py 文件源码 项目:deep-learning 作者: ljanyst 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get_optimizers(self, learning_rate=0.002, smooth=0.1):
        #-----------------------------------------------------------------------
        # Define loss functions
        #-----------------------------------------------------------------------
        with tf.variable_scope('loses'):
            dsc_real_loss = tf.reduce_mean(
              tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.dsc_real_logits,
                labels=tf.ones_like(self.dsc_real_logits) * (1 - smooth)))

            dsc_fake_loss = tf.reduce_mean(
              tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.dsc_fake_logits,
                labels=tf.zeros_like(self.dsc_fake_logits)))

            dsc_loss = (dsc_real_loss + dsc_fake_loss)/2

            gen_loss = tf.reduce_mean(
              tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.dsc_fake_logits,
                labels=tf.ones_like(self.dsc_fake_logits)))

        #-----------------------------------------------------------------------
        # Optimizers
        #-----------------------------------------------------------------------
        trainable_vars = tf.trainable_variables()
        gen_vars = [var for var in trainable_vars \
                      if var.name.startswith('generator')]
        dsc_vars = [var for var in trainable_vars \
                      if var.name.startswith('discriminator')]

        with tf.variable_scope('optimizers'):
            with tf.variable_scope('deiscriminator_optimizer'):
                dsc_train_opt = tf.train.AdamOptimizer(learning_rate) \
                  .minimize(dsc_loss, var_list=dsc_vars)
            with tf.variable_scope('generator_optimizer'):
                gen_train_opt = tf.train.AdamOptimizer(learning_rate) \
                  .minimize(gen_loss, var_list=gen_vars)

        return dsc_train_opt, gen_train_opt, dsc_loss, gen_loss
vae.py 文件源码 项目:vae-npvc 作者: JeremyCCHsu 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _optimize(self):
        '''
        NOTE: The author said that there was no need for 100 d_iter per 100 iters.
              https://github.com/igul222/improved_wgan_training/issues/3
        '''
        global_step = tf.Variable(0, name='global_step')
        lr = self.arch['training']['lr']
        b1 = self.arch['training']['beta1']
        b2 = self.arch['training']['beta2']

        optimizer = tf.train.AdamOptimizer(lr, b1, b2)

        trainables = tf.trainable_variables()
        g_vars = [v for v in trainables if 'Generator' in v.name or 'y_emb' in v.name]
        d_vars = [v for v in trainables if 'Discriminator' in v.name]
        e_vars = [v for v in trainables if 'Encoder' in v.name]

        # # Debug ===============
        # debug(['Generator', 'Discriminator'], [g_vars, d_vars])
        # # ============================

        with tf.name_scope('Update'):
            opt_d = optimizer.minimize(self.loss['l_D'], var_list=d_vars)
            opt_e = optimizer.minimize(self.loss['l_E'], var_list=e_vars)
            with tf.control_dependencies([opt_e]):
                opt_g = optimizer.minimize(self.loss['l_G'], var_list=g_vars, global_step=global_step)
        return {
            'd': opt_d,
            'g': opt_g,
            'e': opt_e,
            'global_step': global_step
        }
gan.py 文件源码 项目:vae-npvc 作者: JeremyCCHsu 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _optimize(self):
        '''
        NOTE: The author said that there was no need for 100 d_iter per 100 iters. 
              https://github.com/igul222/improved_wgan_training/issues/3
        '''
        global_step = tf.Variable(0, name='global_step')
        lr = self.arch['training']['lr']
        b1 = self.arch['training']['beta1']
        b2 = self.arch['training']['beta2']

        optimizer = tf.train.AdamOptimizer(lr, b1, b2)

        trainables = tf.trainable_variables()
        g_vars = [v for v in trainables if 'Generator' in v.name or 'y_emb' in v.name]
        d_vars = [v for v in trainables if 'Discriminator' in v.name]

        # # Debug ===============
        # debug(['Generator', 'Discriminator'], [g_vars, d_vars])
        # # ============================

        with tf.name_scope('Update'):        
            opt_g = optimizer.minimize(self.loss['l_G'], var_list=g_vars, global_step=global_step)
            opt_d = optimizer.minimize(self.loss['l_D'], var_list=d_vars)
        return {
            'd': opt_d,
            'g': opt_g,
            'global_step': global_step
        }
gan.py 文件源码 项目:vae-npvc 作者: JeremyCCHsu 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _optimize(self):
        '''
        NOTE: The author said that there was no need for 100 d_iter per 100 iters. 
              https://github.com/igul222/improved_wgan_training/issues/3
        '''
        global_step = tf.Variable(0, name='global_step')
        lr = self.arch['training']['lr']
        b1 = self.arch['training']['beta1']
        b2 = self.arch['training']['beta2']
        rho = self.arch['training']['rho']

        optimizer = tf.train.AdamOptimizer(lr, b1, b2)
        optimizer_l = tf.train.GradientDescentOptimizer(rho)

        trainables = tf.trainable_variables()
        g_vars = [v for v in trainables if 'Generator' in v.name or 'y_emb' in v.name]
        d_vars = [v for v in trainables if 'Discriminator' in v.name]
        l_vars = [v for v in trainables if 'lambda' in v.name]

        # # Debug ===============
        # debug(['G', 'D', 'lambda'], [g_vars, d_vars, l_vars])
        # # ============================

        with tf.name_scope('Update'):        
            opt_g = optimizer.minimize(self.loss['l_G'], var_list=g_vars, global_step=global_step)
            opt_l = optimizer_l.minimize(- self.loss['l_D'], var_list=l_vars)
            with tf.control_dependencies([opt_l]):
                opt_d = optimizer.minimize(self.loss['l_D'], var_list=d_vars)            
        return {
            'd': opt_d,
            'g': opt_g,
            'l': opt_l,
            'global_step': global_step
        }
BaseModel.py 文件源码 项目:kaggle-review 作者: daxiongshu 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _save(self):
        # save weights in .npy format
        # this function could be overwritten
        weights = {}
        tvars = tf.trainable_variables() + tf.get_collection(tf.GraphKeys.SAVE_TENSORS)
        tvars_vals = self.sess.run(tvars)

        for var, val in zip(tvars, tvars_vals):
            weights[var.name] = val

        name = "{}/{}_{}_{}_{}.npy".format(self.flags.save_path, self.flags.task, self.flags.run_name, self.flags.net, self.flags.pre_epochs + int(self.epoch))
        np.save(name, weights)


问题


面经


文章

微信
公众号

扫码关注公众号