python类all_variables()的实例源码

layers.py 文件源码 项目:deepsleepnet 作者: akaraspt 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_variables_with_name(name, train_only=True, printable=False):
    """Get variable list by a given name scope.

    Examples
    ---------
    >>> dense_vars = tl.layers.get_variable_with_name('dense', True, True)
    """
    print("  [*] geting variables with %s" % name)
    # tvar = tf.trainable_variables() if train_only else tf.all_variables()
    if train_only:
        t_vars = tf.trainable_variables()
    else:
        try: # TF1.0
            t_vars = tf.global_variables()
        except: # TF0.12
            t_vars = tf.all_variables()

    d_vars = [var for var in t_vars if name in var.name]
    if printable:
        for idx, v in enumerate(d_vars):
            print("  got {:3}: {:15}   {}".format(idx, v.name, str(v.get_shape())))
    return d_vars
decode_parse_nn.py 文件源码 项目:seq2seq_parser 作者: trangham283 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def decode():
  """ Decode file sentence-by-sentence  """
  with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
    # Create model and load parameters.
    with tf.variable_scope("model", reuse=None):
      model_dev, steps_done = create_model_default(sess, forward_only=True, attention=attention, model_path=model_path)
    #for v in tf.all_variables():
    #  print(v.name, v.get_shape())

#    eval_batch_size = 64
#    start_time = time.time()
#    do_evalb(model_dev, sess, dev_set, eval_batch_size)
#    time_elapsed = time.time() - start_time
#    print("Batched evalb time: ", time_elapsed)

    start_time = time.time()
    write_decode(model_dev, sess, dev_set) 
    time_elapsed = time.time() - start_time
    print("Decoding all dev time: ", time_elapsed)
train_many2one.py 文件源码 项目:seq2seq_parser 作者: trangham283 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def decode(debug=True):
  """ Decode file sentence-by-sentence  """
  with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
    # Create model and load parameters.
    with tf.variable_scope("model", reuse=None):
      model_dev, steps_done = create_model(sess, forward_only=True, dropout=False, model_path=FLAGS.model_path)

    if debug:
      for v in tf.all_variables(): print(v.name, v.get_shape())

    eval_batch_size = 64
    start_time = time.time()
    do_evalb(model_dev, sess, dev_set, eval_batch_size)
    time_elapsed = time.time() - start_time
    print("Batched evalb time: ", time_elapsed)

#    start_time = time.time()
#    write_decode(model_dev, sess, dev_set, eval_batch_size) 
#    time_elapsed = time.time() - start_time
#    print("Decoding all dev time: ", time_elapsed)
decode_parse_nn.py 文件源码 项目:seq2seq_parser 作者: trangham283 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def decode():
  """ Decode file sentence-by-sentence  """
  with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
    # Create model and load parameters.
    with tf.variable_scope("model", reuse=None):
      model_dev, steps_done = create_model_default(sess, forward_only=True, dropout=False, model_path=model_path)

    #for v in tf.all_variables():
    #  print(v.name, v.get_shape())

#    eval_batch_size = 64
#    start_time = time.time()
#    do_evalb(model_dev, sess, dev_set, eval_batch_size)
#    time_elapsed = time.time() - start_time
#    print("Batched evalb time: ", time_elapsed)

    start_time = time.time()
    write_decode(model_dev, sess, dev_set) 
    time_elapsed = time.time() - start_time
    print("Decoding all dev time: ", time_elapsed)
save_variables.py 文件源码 项目:seq2seq_parser 作者: trangham283 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def save_vars(filename):
  """ Decode file sentence-by-sentence  """
  with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
    # Create model and load parameters.
    with tf.variable_scope("model", reuse=None):
      model_dev, steps_done = create_model_default(sess, forward_only=True, dropout=False, model_path=model_path)


    var_dict = {}
    for var in tf.all_variables():
      if 'Adagrad' in var.name: continue
      var_dict[var.name] = var.eval()

    pickle.dump(var_dict, open(filename, 'w'))

    #for v in tf.all_variables():
    #  print(v.name, v.get_shape())
train_many2one.py 文件源码 项目:seq2seq_parser 作者: trangham283 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def decode(debug=True):
  """ Decode file sentence-by-sentence  """
  with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
    # Create model and load parameters.
    with tf.variable_scope("model", reuse=None):
      model_dev, steps_done = create_model(sess, forward_only=True, dropout=False, model_path=FLAGS.model_path)

    if debug:
      for v in tf.all_variables(): print(v.name, v.get_shape())

    eval_batch_size = 64
    start_time = time.time()
    do_evalb(model_dev, sess, dev_set, eval_batch_size)
    time_elapsed = time.time() - start_time
    print("Batched evalb time: ", time_elapsed)

#    start_time = time.time()
#    write_decode(model_dev, sess, dev_set, eval_batch_size) 
#    time_elapsed = time.time() - start_time
#    print("Decoding all dev time: ", time_elapsed)
decode_one.py 文件源码 项目:seq2seq_parser 作者: trangham283 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def decode(debug=True):
  """ Decode file sentence-by-sentence  """
  with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
    # Create model and load parameters.
    with tf.variable_scope("model", reuse=None):
      model_dev, steps_done = create_model(sess, forward_only=True, dropout=False, model_path=model_path)

    if debug:
      for v in tf.all_variables(): print(v.name, v.get_shape())

    eval_batch_size = 64
    start_time = time.time()
    do_evalb(model_dev, sess, dev_set, eval_batch_size)
    time_elapsed = time.time() - start_time
    print("Batched evalb time: ", time_elapsed)

#    start_time = time.time()
#    write_decode(model_dev, sess, dev_set, eval_batch_size) 
#    time_elapsed = time.time() - start_time
#    print("Decoding all dev time: ", time_elapsed)
decode_many2one.py 文件源码 项目:seq2seq_parser 作者: trangham283 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def decode(debug=True):
  """ Decode file sentence-by-sentence  """
  with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
    # Create model and load parameters.
    with tf.variable_scope("model", reuse=None):
      model_dev, steps_done = create_model(sess, forward_only=True, dropout=False, model_path=model_path)

    if debug:
      for v in tf.all_variables(): print(v.name, v.get_shape())

    eval_batch_size = 64
    start_time = time.time()
    do_evalb(model_dev, sess, dev_set, eval_batch_size)
    time_elapsed = time.time() - start_time
    print("Batched evalb time: ", time_elapsed)

#    start_time = time.time()
#    write_decode(model_dev, sess, dev_set, eval_batch_size) 
#    time_elapsed = time.time() - start_time
#    print("Decoding all dev time: ", time_elapsed)
train_many2one.py 文件源码 项目:seq2seq_parser 作者: trangham283 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def decode(debug=True):
  """ Decode file """
  with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
    # Create model and load parameters.
    with tf.variable_scope("model", reuse=None):
      model_dev, steps_done = create_model(sess, forward_only=True)

    if debug:
      for v in tf.all_variables(): print(v.name, v.get_shape())

    dev_set = load_dev_data()
    eval_batch_size = FLAGS.batch_size

    start_time = time.time()
    write_decode(model_dev, sess, dev_set, eval_batch_size, steps_done, eval_now=True) 
    time_elapsed = time.time() - start_time
    print("Decoding all dev time: ", time_elapsed)
train_many2one.py 文件源码 项目:seq2seq_parser 作者: trangham283 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def decode(debug=True):
  """ Decode file """
  with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
    # Create model and load parameters.
    with tf.variable_scope("model", reuse=None):
      model_dev, steps_done = create_model(sess, forward_only=True)

    if debug:
      for v in tf.all_variables(): print(v.name, v.get_shape())

    dev_set = load_dev_data()
    eval_batch_size = FLAGS.batch_size

    start_time = time.time()
    write_decode(model_dev, sess, dev_set, eval_batch_size, steps_done, eval_now=True) 
    time_elapsed = time.time() - start_time
    print("Decoding all dev time: ", time_elapsed)
train_many2one.py 文件源码 项目:seq2seq_parser 作者: trangham283 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def decode(debug=True):
  """ Decode file sentence-by-sentence  """
  with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
    # Create model and load parameters.
    with tf.variable_scope("model", reuse=None):
      model_dev, steps_done = create_model(sess, forward_only=True, dropout=False, model_path=FLAGS.model_path)

    if debug:
      for v in tf.all_variables(): print(v.name, v.get_shape())

    eval_batch_size = 64
    start_time = time.time()
    do_evalb(model_dev, sess, dev_set, eval_batch_size)
    time_elapsed = time.time() - start_time
    print("Batched evalb time: ", time_elapsed)

#    start_time = time.time()
#    write_decode(model_dev, sess, dev_set, eval_batch_size) 
#    time_elapsed = time.time() - start_time
#    print("Decoding all dev time: ", time_elapsed)
save_variables.py 文件源码 项目:seq2seq_parser 作者: trangham283 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def save_vars(filename):
  """ Decode file sentence-by-sentence  """
  with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
    # Create model and load parameters.
    with tf.variable_scope("model", reuse=None):
      model_dev, steps_done = create_model_default(sess, forward_only=True, dropout=False, model_path=model_path)


    var_dict = {}
    for var in tf.all_variables():
      print(var.name, var.get_shape())
      if 'Adagrad' in var.name: continue
      var_dict[var.name] = var.eval()

    pickle.dump(var_dict, open(filename, 'w'))

    #for v in tf.all_variables():
    #  print(v.name, v.get_shape())
decode_parse_nn.py 文件源码 项目:seq2seq_parser 作者: trangham283 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def decode():
  """ Decode file sentence-by-sentence  """
  with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
    # Create model and load parameters.
    with tf.variable_scope("model", reuse=None):
      model_dev, steps_done = create_model_default(sess, forward_only=True, attention=attention, model_path=model_path)
    #with tf.variable_scope("model", reuse=True):
    #  model_dev = seq2seq_model.Seq2SeqModel(90000, 128, _buckets, 256, 3, 512, 5.0, 128, 0.1, 0.99, forward_only=True, attention=attention)      
    #for v in tf.all_variables():
    #  print(v.name, v.get_shape())

    #eval_batch_size = 64
    #start_time = time.time()
    #do_evalb(model_dev, sess, dev_set, eval_batch_size)
    #time_elapsed = time.time() - start_time
    #print("Batched evalb time: ", time_elapsed)

    start_time = time.time()
    write_decode(model_dev, sess, dev_set) 
    time_elapsed = time.time() - start_time
    print("Decoding all dev time: ", time_elapsed)
decode_parse_nn.py 文件源码 项目:seq2seq_parser 作者: trangham283 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def decode():
  """ Decode file sentence-by-sentence  """
  with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
    # Create model and load parameters.
    with tf.variable_scope("model", reuse=None):
      model_dev, steps_done = create_model_default(sess, forward_only=True, attention=attention, model_path=model_path)

#    for v in tf.all_variables():
#      print(v.name, v.get_shape())

    eval_batch_size = 64
    start_time = time.time()
    do_evalb(model_dev, sess, dev_set, eval_batch_size)
    time_elapsed = time.time() - start_time
    print("Batched evalb time: ", time_elapsed)

#    start_time = time.time()
#    write_decode(model_dev, sess, dev_set) 
#    time_elapsed = time.time() - start_time
#    print("Decoding all dev time: ", time_elapsed)
save_variables.py 文件源码 项目:seq2seq_parser 作者: trangham283 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def save_vars(filename):
  """ Decode file sentence-by-sentence  """
  with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
    # Create model and load parameters.
    with tf.variable_scope("model", reuse=None):
      model_dev, steps_done = create_model_default(sess, forward_only=True, dropout=False, model_path=model_path)


    var_dict = {}
    for var in tf.all_variables():
      if 'Adagrad' in var.name: continue
      var_dict[var.name] = var.eval()

    pickle.dump(var_dict, open(filename, 'w'))

    #for v in tf.all_variables():
    #  print(v.name, v.get_shape())
train.py 文件源码 项目:lm 作者: siddk 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def main(_):
    """
    Main function, loads and vectorizes the data, instantiates the network variables, and runs the
    training process.
    """
    # Perform data preprocessing here
    # TODO

    with tf.Session() as sess:
        # Instantiate Network
        print 'Building Network!'
        rlangmod = RLangmod(FLAGS.vocabulary_size, FLAGS.embedding_size, FLAGS.hidden_size)

        # Create a saver.
        saver = tf.train.Saver(tf.all_variables())

        # Initialize all variables
        print "Initializing Variables"
        sess.run(tf.initialize_all_variables())
behavioral_cloning.py 文件源码 项目:gail-driver 作者: sisl 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def save_h5(args, net):
    # Begin tf session
    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        saver = tf.train.Saver(tf.all_variables())

        # load from previous save
        if len(args.ckpt_name) > 0:
            saver.restore(sess, os.path.join(args.save_dir, args.ckpt_name))
        else:
            print 'checkpoint name not specified... exiting.'
            return

        vs = tf.get_collection(tf.GraphKeys.VARIABLES)
        vals = sess.run(vs)
        exclude = ['learning_rate', 'beta', 'Adam']

        with h5py.File(args.h5_name, 'a') as f:
            dset = f.create_group('iter00001')
            for v, val in safezip(vs, vals):
                if all([e not in v.name for e in exclude]):
                    dset[v.name] = val

# Train network
trainer.py 文件源码 项目:DialogueBreakdownDetection2016 作者: icoxfog417 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def set_optimizer(self, session, learning_rate=0.5, learning_rate_decay_factor=0.99, max_gradient_norm=5.0, load_if_exist=True):
        self.global_step = tf.Variable(0, trainable=False)
        self.learning_rate = tf.Variable(float(learning_rate), trainable=False)
        self.learning_rate_opr = self.learning_rate.assign(self.learning_rate * learning_rate_decay_factor)
        self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)

        self.outputs, self.losses = self.calc_loss()

        params = tf.trainable_variables()
        for b in range(len(self.buckets)):
            gradients = tf.gradients(self.losses[b], params)
            clipped_gradients, norm = tf.clip_by_global_norm(gradients, max_gradient_norm)

            self.gradient_norms.append(norm)
            self.updates.append(self.optimizer.apply_gradients(zip(clipped_gradients, params), global_step=self.global_step))

        self.saver = tf.train.Saver(tf.all_variables())
        session.run(tf.initialize_all_variables())
        if load_if_exist and self.train_dir:
            saved = tf.train.get_checkpoint_state(self.train_dir)
            if saved and tf.gfile.Exists(saved.model_checkpoint_path):
                self.saver.restore(session, saved.model_checkpoint_path)
model.py 文件源码 项目:DialogueBreakdownDetection2016 作者: icoxfog417 项目源码 文件源码 阅读 112 收藏 0 点赞 0 评论 0
def build(self, session, predict=True, projection=True):
        for j, bucket in enumerate(self.buckets):
            with vs.variable_scope(vs.get_variable_scope(), reuse=True if j > 0 else None):
                o, d_s, e_s = self.model.forward(
                    self.encoder_inputs[:bucket[0]], self.decoder_inputs[:bucket[1]], predict=predict, projection=projection
                )
                self._outputs.append(o)
                self._encoder_state.append(e_s)
                self._decoder_state.append(d_s)

        self.saver = tf.train.Saver(tf.all_variables())
        session.run(tf.initialize_all_variables())
        if self.model_path:
            saved = tf.train.get_checkpoint_state(self.model_path)
            if saved and tf.gfile.Exists(saved.model_checkpoint_path):
                self.saver.restore(session, saved.model_checkpoint_path)
        self._graph_builded = True
trainer.py 文件源码 项目:DialogueBreakdownDetection2016 作者: icoxfog417 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def set_optimizer(self, session, learning_rate=0.1, learning_rate_decay_factor=0.99, max_gradient_norm=5.0, load_if_exist=True):
        self.global_step = tf.Variable(0, trainable=False)
        self.learning_rate = tf.Variable(float(learning_rate), trainable=False)
        self.learning_rate_opr = self.learning_rate.assign(self.learning_rate * learning_rate_decay_factor)
        self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)

        self.outputs, self.losses = self.calc_loss()

        params = tf.trainable_variables()
        for b in range(len(self.buckets)):
            gradients = tf.gradients(self.losses[b], params)
            clipped_gradients, norm = tf.clip_by_global_norm(gradients, max_gradient_norm)

            self.gradient_norms.append(norm)
            self.updates.append(self.optimizer.apply_gradients(zip(clipped_gradients, params), global_step=self.global_step))

        self.saver = tf.train.Saver(tf.all_variables())
        session.run(tf.initialize_all_variables())
        if load_if_exist and self.train_dir:
            saved = tf.train.get_checkpoint_state(self.train_dir)
            if saved and tf.gfile.Exists(saved.model_checkpoint_path):
                self.saver.restore(session, saved.model_checkpoint_path)


问题


面经


文章

微信
公众号

扫码关注公众号