python类initialize_all_variables()的实例源码

translate.py 文件源码 项目:tf-seq2seq-mod 作者: knok 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def self_test():
  """Test the translation model."""
  with tf.Session() as sess:
    print("Self-test for neural translation model.")
    # Create model with vocabularies of 10, 2 small buckets, 2 layers of 32.
    model = seq2seq_model.Seq2SeqModel(10, 10, [(3, 3), (6, 6)], 32, 2,
                                       5.0, 32, 0.3, 0.99, num_samples=8)
    sess.run(tf.initialize_all_variables())

    # Fake data set for both the (3, 3) and (6, 6) bucket.
    data_set = ([([1, 1], [2, 2]), ([3, 3], [4]), ([5], [6])],
                [([1, 1, 1, 1, 1], [2, 2, 2, 2, 2]), ([3, 3, 3], [5, 6])])
    for _ in xrange(5):  # Train the fake model for 5 steps.
      bucket_id = random.choice([0, 1])
      encoder_inputs, decoder_inputs, target_weights = model.get_batch(
          data_set, bucket_id)
      model.step(sess, encoder_inputs, decoder_inputs, target_weights,
                 bucket_id, False)
critic_network.py 文件源码 项目:-NIPS-2017-Learning-to-Run 作者: kyleliang919 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self,sess,state_dim,action_dim):
        self.time_step = 0
        self.sess = sess
        # create q network
        self.state_input,\
        self.action_input,\
        self.q_value_output,\
        self.net = self.create_q_network(state_dim,action_dim)

        # create target q network (the same structure with q network)
        self.target_state_input,\
        self.target_action_input,\
        self.target_q_value_output,\
        self.target_update = self.create_target_q_network(state_dim,action_dim,self.net)

        self.create_training_method()

        # initialization
        self.sess.run(tf.initialize_all_variables())

        self.update_target()
actor_network.py 文件源码 项目:-NIPS-2017-Learning-to-Run 作者: kyleliang919 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self,sess,state_dim,action_dim):

        self.sess = sess
        self.state_dim = state_dim
        self.action_dim = action_dim
        # create actor network
        self.state_input,self.action_output,self.net = self.create_network(state_dim,action_dim)

        # create target actor network
        self.target_state_input,self.target_action_output,self.target_update,self.target_net = self.create_target_network(state_dim,action_dim,self.net)

        # define training rules
        self.create_training_method()

        self.sess.run(tf.initialize_all_variables())

        self.update_target()
        #self.load_network()
value_function.py 文件源码 项目:-NIPS-2017-Learning-to-Run 作者: kyleliang919 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def create_net(self, shape):
        hidden_size = 64
        print(shape)
        self.x = tf.placeholder(tf.float32, shape=[None, shape], name="x")
        self.y = tf.placeholder(tf.float32, shape=[None], name="y")

        weight_init = tf.random_uniform_initializer(-0.05, 0.05)
        bias_init = tf.constant_initializer(0)

        with tf.variable_scope("VF"):
            h1 = tf.nn.relu(fully_connected(self.x, shape, hidden_size, weight_init, bias_init, "h1"))
            h2 = tf.nn.relu(fully_connected(h1, hidden_size, hidden_size, weight_init, bias_init, "h2"))
            h3 = fully_connected(h2, hidden_size, 1, weight_init, bias_init, "h3")
        self.net = tf.reshape(h3, (-1,))
        l2 = tf.nn.l2_loss(self.net - self.y)
        self.train = tf.train.AdamOptimizer().minimize(l2)
        self.session.run(tf.initialize_all_variables())
critic_network.py 文件源码 项目:-NIPS-2017-Learning-to-Run 作者: kyleliang919 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def __init__(self,sess,state_dim,action_dim):
        self.time_step = 0
        self.sess = sess
        # create q network
        self.state_input,\
        self.action_input,\
        self.q_value_output,\
        self.net = self.create_q_network(state_dim,action_dim,"cbeh")

        # create target q network (the same structure with q network)
        self.target_state_input,\
        self.target_action_input,\
        self.target_q_value_output,\
        self.target_update = self.create_target_q_network(state_dim,action_dim,self.net,"ctare")

        self.create_training_method()

        # initialization
        self.sess.run(tf.initialize_all_variables())

        self.update_target()
actor_network.py 文件源码 项目:-NIPS-2017-Learning-to-Run 作者: kyleliang919 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __init__(self,sess,state_dim,action_dim):

        self.sess = sess
        self.state_dim = state_dim
        self.action_dim = action_dim
        # create actor network

        self.state_input,self.action_output,self.net = self.create_network(state_dim,action_dim,"beh")

        # create target actor network
        self.target_state_input,self.target_action_output,self.target_update,self.target_net = self.create_target_network(state_dim,action_dim,self.net)

        # define training rules
        self.create_training_method()

        self.sess.run(tf.initialize_all_variables())

        self.update_target()
        #self.load_network()
variational_autoencoder.py 文件源码 项目:hyperchamber 作者: 255BITS 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __init__(self, network_architecture, transfer_fct=tf.nn.softplus, 
                 learning_rate=0.001, batch_size=100):
        self.network_architecture = network_architecture
        self.transfer_fct = transfer_fct
        self.learning_rate = learning_rate
        self.batch_size = batch_size

        # tf Graph input
        self.x = tf.placeholder(tf.float32, [None, network_architecture["n_input"]])

        # Create autoencoder network
        self._create_network()
        # Define loss function based variational upper-bound and 
        # corresponding optimizer
        self._create_loss_optimizer()

        # Initializing the tensor flow variables
        init = tf.initialize_all_variables()

        # Launch the session
        self.sess = tf.InteractiveSession()
        self.sess.run(init)
execute.py 文件源码 项目:deep-news-summarization 作者: hengluchang 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def create_model(session, forward_only):

  """Create model and initialize or load parameters"""
  model = seq2seq_model.Seq2SeqModel( gConfig['enc_vocab_size'], gConfig['dec_vocab_size'], _buckets, gConfig['hidden_units'], gConfig['num_layers'], gConfig['max_gradient_norm'], gConfig['batch_size'], gConfig['learning_rate'], gConfig['learning_rate_decay_factor'], forward_only=forward_only)

  if 'pretrained_model' in gConfig:
      model.saver.restore(session,gConfig['pretrained_model'])
      return model

  ckpt = tf.train.get_checkpoint_state(gConfig['working_directory'])
  if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):
    print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
    model.saver.restore(session, ckpt.model_checkpoint_path)
  else:
    print("Created model with fresh parameters.")
    session.run(tf.initialize_all_variables())
  return model
naive_tf_test.py 文件源码 项目:automatic-portrait-tf 作者: Corea 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def main():
    model_filename = '../fcn8s-heavy-pascal.mat'
    input_image_filename = '../cat.jpg'

    caffe_mat = np.load(model_filename)
    image = build_image(input_image_filename)
    net = build_fcn8s(caffe_mat, image)
    feed_dict = {
        net['input']: image
    }

    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        result = sess.run(tf.argmax(net['score'], dimension=3),
                          feed_dict=feed_dict)

    save_image(result)
test_gradient_moment.py 文件源码 项目:probabilistic_line_search 作者: ProbabilisticNumerics 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def setUp(self):    
    # Set up model
    tf.reset_default_graph()
    X = tf.placeholder(tf.float32, shape=[None, 784])
    y = tf.placeholder(tf.float32, shape=[None, 10])
    W_fc1 = weight_variable([784, 1024])
    b_fc1 = bias_variable([1024])
    h_fc1 = tf.nn.relu(tf.matmul(X, W_fc1) + b_fc1)
    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    h_fc2 = tf.nn.softmax(tf.matmul(h_fc1, W_fc2) + b_fc2)
    losses = -tf.reduce_sum(y*tf.log(h_fc2), reduction_indices=[1])

    self.loss = tf.reduce_mean(losses)
    self.batch_size = tf.cast(tf.gather(tf.shape(losses), 0), tf.float32)
    self.var_list = [W_fc1, b_fc1, W_fc2, b_fc2]
    self.X = X
    self.y = y

    self.sess = tf.Session()
    self.sess.run(tf.initialize_all_variables())

    self.mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
nasm.py 文件源码 项目:variational-text-tensorflow 作者: carpedm20 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def train(self, config):
    start_time = time.time()

    merged_sum = tf.merge_all_summaries()
    writer = tf.train.SummaryWriter("./logs", self.sess.graph_def)

    tf.initialize_all_variables().run()
    self.load(self.checkpoint_dir)

    for epoch in range(self.epoch):
      epoch_loss = 0.

      for idx, x in enumerate(self.reader.next_batch()):
        _, loss, e_loss, g_loss, summary_str = self.sess.run(
            [self.optim, self.loss, self.e_loss, self.g_loss, merged_sum], feed_dict={self.x: x})

        epoch_loss += loss
        if idx % 10 == 0:
          print("Epoch: [%2d] [%4d/%4d] time: %4.4f, loss: %.8f, e_loss: %.8f, g_loss: %.8f" \
              % (epoch, idx, self.reader.batch_cnt, time.time() - start_time, loss, e_loss, g_loss))

        if idx % 2 == 0:
          writer.add_summary(summary_str, step)

        if idx != 0 and idx % 1000 == 0:
          self.save(self.checkpoint_dir, step)
base.py 文件源码 项目:variational-text-tensorflow 作者: carpedm20 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def initialize(self, log_dir="./logs"):
    self.merged_sum = tf.merge_all_summaries()
    self.writer = tf.train.SummaryWriter(log_dir, self.sess.graph_def)

    tf.initialize_all_variables().run()
    self.load(self.checkpoint_dir)

    start_iter = self.step.eval()
visual_search.py 文件源码 项目:visual-search 作者: GYXie 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def extract_feature(imgs):
    x, fc6 = initModel()
    # init = tf.initialize_all_variables()
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    return sess.run(fc6, feed_dict={x: imgs})
train.py 文件源码 项目:tf_rnnlm 作者: Ubiqus 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _run(self):
    m, mvalid, mtest = self.train_model, self.validation_model, self.test_model
    config = self.config
    data = self.data
    params = self.params

    init_op = tf.initialize_all_variables()
    with tf.Session() as session:
      session.run(init_op)

      print("Starting training from epoch %d using %s loss" % (config.epoch, m.loss_fct))

      while config.epoch <= config.max_max_epoch:
        i = config.epoch
        lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
        m.assign_lr(session, config.learning_rate * lr_decay)

        print("\nEpoch: %d Learning rate: %.3f" % (i, session.run(m.lr)))
        train_perplexity = run_epoch(session, m,
          data.train,
          eval_op=m.train_op,
          verbose=True,
          opIO=self.io,
          log_rate=params.log_rate,
          save_rate=params.save_rate)
        print("Epoch: %d Train Perplexity: %.3f" % (i, train_perplexity))

        print("Validation using %s loss" % mvalid.loss_fct)
        valid_perplexity = run_epoch(session, mvalid, data.valid)
        print("Epoch: %d Valid Perplexity: %.3f" % (i, valid_perplexity))

        config.step = 0
        config.epoch += 1
        config.save()

        self.io.save_checkpoint(session, "ep_%d.ckpt" % config.epoch)
save_samples.py 文件源码 项目:encore.ai 作者: dyelax 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def save(artist, model_path, num_save):
    sample_save_dir = c.get_dir('../save/samples/')
    sess = tf.Session()

    print artist

    data_reader = DataReader(artist)
    vocab = data_reader.get_vocab()

    print 'Init model...'
    model = LSTMModel(sess,
                      vocab,
                      c.BATCH_SIZE,
                      c.SEQ_LEN,
                      c.CELL_SIZE,
                      c.NUM_LAYERS,
                      test=True)

    saver = tf.train.Saver()
    sess.run(tf.initialize_all_variables())

    saver.restore(sess, model_path)
    print 'Model restored from ' + model_path

    artist_save_dir = c.get_dir(join(sample_save_dir, artist))
    for i in xrange(num_save):
        print i

        path = join(artist_save_dir, str(i) + '.txt')
        sample = model.generate()
        processed_sample = process_sample(sample)

        with open(path, 'w') as f:
            f.write(processed_sample)
actor_net.py 文件源码 项目:ddpg-aigym 作者: stevenpjg 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self,num_states,num_actions):
        self.g=tf.Graph()
        with self.g.as_default():
            self.sess = tf.InteractiveSession()


            #actor network model parameters:
            self.W1_a, self.B1_a, self.W2_a, self.B2_a, self.W3_a, self.B3_a,\
            self.actor_state_in, self.actor_model = self.create_actor_net(num_states, num_actions)


            #target actor network model parameters:
            self.t_W1_a, self.t_B1_a, self.t_W2_a, self.t_B2_a, self.t_W3_a, self.t_B3_a,\
            self.t_actor_state_in, self.t_actor_model = self.create_actor_net(num_states, num_actions)

            #cost of actor network:
            self.q_gradient_input = tf.placeholder("float",[None,num_actions]) #gets input from action_gradient computed in critic network file
            self.actor_parameters = [self.W1_a, self.B1_a, self.W2_a, self.B2_a, self.W3_a, self.B3_a]
            self.parameters_gradients = tf.gradients(self.actor_model,self.actor_parameters,-self.q_gradient_input)#/BATCH_SIZE) 
            self.optimizer = tf.train.AdamOptimizer(LEARNING_RATE).apply_gradients(zip(self.parameters_gradients,self.actor_parameters))  
            #initialize all tensor variable parameters:
            self.sess.run(tf.initialize_all_variables())    

            #To make sure actor and target have same intial parmameters copy the parameters:
            # copy target parameters
            self.sess.run([
                self.t_W1_a.assign(self.W1_a),
                self.t_B1_a.assign(self.B1_a),
                self.t_W2_a.assign(self.W2_a),
                self.t_B2_a.assign(self.B2_a),
                self.t_W3_a.assign(self.W3_a),
                self.t_B3_a.assign(self.B3_a)])

            self.update_target_actor_op = [
                self.t_W1_a.assign(TAU*self.W1_a+(1-TAU)*self.t_W1_a),
                self.t_B1_a.assign(TAU*self.B1_a+(1-TAU)*self.t_B1_a),
                self.t_W2_a.assign(TAU*self.W2_a+(1-TAU)*self.t_W2_a),
                self.t_B2_a.assign(TAU*self.B2_a+(1-TAU)*self.t_B2_a),
                self.t_W3_a.assign(TAU*self.W3_a+(1-TAU)*self.t_W3_a),
                self.t_B3_a.assign(TAU*self.B3_a+(1-TAU)*self.t_B3_a)]
train.py 文件源码 项目:lstm-poetry 作者: dvictor 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def train():
    cleanup.cleanup()
    c.save(c.work_dir)

    data_loader = TextLoader(c.work_dir, c.batch_size, c.seq_length)
    with open(os.path.join(c.work_dir, 'chars_vocab.pkl'), 'wb') as f:
        cPickle.dump((data_loader.chars, data_loader.vocab), f)

    model = Model(c.rnn_size, c.num_layers, len(data_loader.chars), c.grad_clip, c.batch_size, c.seq_length)

    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        saver = tf.train.Saver(tf.all_variables())
        for e in range(c.num_epochs):
            sess.run(tf.assign(model.lr, c.learning_rate * (c.decay_rate ** e)))
            data_loader.reset_batch_pointer()
            state = model.initial_state.eval()
            for b in range(data_loader.num_batches):
                start = time.time()
                x, y = data_loader.next_batch()
                feed = {model.input_data: x, model.targets: y, model.initial_state: state}
                train_loss, state, _ = sess.run([model.cost, model.final_state, model.train_op], feed)
                end = time.time()
                print("{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}"
                    .format(e * data_loader.num_batches + b,
                            c.num_epochs * data_loader.num_batches,
                            e, train_loss, end - start))
                if (e * data_loader.num_batches + b) % c.save_every == 0:
                    checkpoint_path = os.path.join(c.work_dir, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=e * data_loader.num_batches + b)
                    print("model saved to {}".format(checkpoint_path))
npg.py 文件源码 项目:rlflow 作者: tpbarron 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self,
                 env,
                 policy,
                 episode_len=100,
                 discount=False,
                 optimizer='sgd'):

        raise NotImplementedError

        self.env = env
        self.policy = policy
        self.episode_len = episode_len
        self.discount = discount

        self.states = tf.placeholder(tf.float32, shape=(None, 4))
        self.actions = tf.placeholder(tf.float32, shape=(None, 2))
        self.rewards = tf.placeholder(tf.float32, shape=(None))
        self.probs = self.policy.model(self.states)

        self.action_probs = tf.mul(self.probs, self.actions)
        self.reduced_action_probs = tf.reduce_sum(self.action_probs, reduction_indices=[1])
        self.logprobs = tf.log(self.reduced_action_probs)
        self.eligibility = self.logprobs * self.rewards
        self.L = -tf.reduce_sum(self.eligibility)

        # fisher matrix
        self.F = tf.mul(self.logprobs, tf.transpose(self.logprobs))



        # TODO: gen optimizer based on param
        self.opt = tf.train.AdamOptimizer(0.005).minimize(self.L)

        # do gradient update separately so do apply custom function to gradients?
        # self.grads_and_vars = self.opt.compute_gradients(self.L)
        # self.apply_grads = self.opt.apply_gradients(self.grads_and_vars)

        self.sess = tf.Session()
        self.sess.run(tf.initialize_all_variables())
GeneticNetwork.py 文件源码 项目:TF-Genetic 作者: thepropterhoc 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, layerDimensions=[], netDimensions=[], validActivationFunctions=[]):

        self.layerDimensions = layerDimensions

        self.x = tf.placeholder(tf.float32, [None, netDimensions[0]])
        previousActivation = self.x

        for idx in range(len(layerDimensions)):
            currentLayer = layerDimensions[idx]
            thisActivation = None
            for functionIndex in range(len(currentLayer)):
                inDim, outDim = currentLayer[functionIndex]
                thisW = tf.Variable(tf.random_normal([inDim, outDim]))
                thisB = tf.Variable(tf.random_normal([outDim]))
                thisFunction = validActivationFunctions[functionIndex]
                newTensor = thisFunction(tf.matmul(previousActivation, thisW) + thisB)
                thisActivation = newTensor if thisActivation is None else tf.concat(1, [thisActivation, newTensor])

            previousActivation = thisActivation

        self.predictedOutput = previousActivation
        self.y_ = tf.placeholder(tf.float32, [None, netDimensions[-1]])
        cross_entropy = tf.reduce_mean(tf.square(self.predictedOutput - self.y_))
        self.train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

        init = tf.initialize_all_variables()
        self.sess = tf.Session(config=tf.ConfigProto(
            inter_op_parallelism_threads=4,
                        intra_op_parallelism_threads=4
        ))
        self.sess.run(init)
cnn.py 文件源码 项目:CNN-MNIST 作者: m516825 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def train(self):

        data = Data(self.train_dat, self.train_lab)
        batch_num = self.length/self.batch_size if self.length%self.batch_size == 0 else self.length/self.batch_size + 1

        model = self.add_model()

        with self.sess as sess:

            tf.initialize_all_variables().run()

            for ite in range(self.iterations):
                print "Iteration {}".format(ite)
                cost = 0.
                pbar = pb.ProgressBar(widgets=[pb.Percentage(), pb.Bar(), pb.ETA()], maxval=batch_num).start()
                for i in range(batch_num):
                    batch_x, batch_y = data.next_batch(self.batch_size)

                    c, _ = self.sess.run([model['loss'], model['optimizer']], feed_dict={model['train_x']:batch_x, model['train_y']:batch_y, model['p_keep_dens']:0.75})

                    cost += c / batch_num
                    pbar.update(i+1)
                pbar.finish()

                print ">>cost: {}".format(cost)

                t_acc, d_acc = self.eval(model, 3000)
                # early stop
                if t_acc >= 0.995 and d_acc >= 0.995:
                    break

            self.predict(model)


问题


面经


文章

微信
公众号

扫码关注公众号