python类Session()的实例源码

run_server.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def load_language(app, tokenizer_service, tag, model_dir):
    config = Config.load(['./default.conf', './default.' + tag + '.conf', os.path.join(model_dir, 'model.conf')])
    model = create_model(config)

    graph = tf.Graph()
    session = tf.Session(graph=graph)
    with graph.as_default():
        # Force everything to run on CPU, we run on single inputs so there is not much point
        # on going through the GPU
        with tf.device('/cpu:0'):
            model.build()
            loader = tf.train.Saver()

        with session.as_default():
            loader.restore(session, os.path.join(model_dir, 'best'))
    tokenizer = Tokenizer(tokenizer_service, tag)
    app.add_language(tag, LanguageContext(tag, tokenizer, session, config, model))
    print('Loaded language ' + tag)
main.py 文件源码 项目:variational-text-tensorflow 作者: carpedm20 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def main(_):
  pp.pprint(flags.FLAGS.__flags)

  data_path = "./data/%s" % FLAGS.dataset
  reader = TextReader(data_path)

  with tf.Session() as sess:
    m = MODELS[FLAGS.model]
    model = m(sess, reader, dataset=FLAGS.dataset,
              embed_dim=FLAGS.embed_dim, h_dim=FLAGS.h_dim,
              learning_rate=FLAGS.learning_rate, max_iter=FLAGS.max_iter,
              checkpoint_dir=FLAGS.checkpoint_dir)

    if FLAGS.forward_only:
      model.load(FLAGS.checkpoint_dir)
    else:
      model.train(FLAGS)

    while True:
      text = raw_input(" [*] Enter text to test: ")
      model.sample(5, text)
sequenceNet.py 文件源码 项目:deep-summarization 作者: harpribot 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def _start_session(self):
        """
        Starts the Tensorflow Session

        :return: None
        """
        self.sess.run(tf.global_variables_initializer())
        # initialize the saver node
        # print tf.GraphKeys.GLOBAL_VARIABLES
        self.saver = tf.train.Saver(tf.global_variables())
        # get the latest checkpoint
        last_checkpoint_path = self.checkpointer.get_last_checkpoint()
        if last_checkpoint_path is not None:
            print 'Previous saved tensorflow objects found... Extracting...'
            # restore the tensorflow variables
            self.saver.restore(self.sess, last_checkpoint_path)
            print 'Extraction Complete. Moving Forward....'
test_tf_qrnn_forward.py 文件源码 项目:tensorflow_qrnn 作者: icoxfog417 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def test_qrnn_linear_forward(self):
        batch_size = 100
        sentence_length = 5
        word_size = 10
        size = 5
        data = self.create_test_data(batch_size, sentence_length, word_size)

        with tf.Graph().as_default() as q_linear:
            qrnn = QRNN(in_size=word_size, size=size, conv_size=1)
            X = tf.placeholder(tf.float32, [batch_size, sentence_length, word_size])
            forward_graph = qrnn.forward(X)

            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                hidden = sess.run(forward_graph, feed_dict={X: data})
                self.assertEqual((batch_size, size), hidden.shape)
test_tf_qrnn_forward.py 文件源码 项目:tensorflow_qrnn 作者: icoxfog417 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_qrnn_with_previous(self):
        batch_size = 100
        sentence_length = 5
        word_size = 10
        size = 5
        data = self.create_test_data(batch_size, sentence_length, word_size)

        with tf.Graph().as_default() as q_with_previous:
            qrnn = QRNN(in_size=word_size, size=size, conv_size=2)
            X = tf.placeholder(tf.float32, [batch_size, sentence_length, word_size])
            forward_graph = qrnn.forward(X)

            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                hidden = sess.run(forward_graph, feed_dict={X: data})
                self.assertEqual((batch_size, size), hidden.shape)
test_tf_qrnn_forward.py 文件源码 项目:tensorflow_qrnn 作者: icoxfog417 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def test_qrnn_convolution(self):
        batch_size = 100
        sentence_length = 5
        word_size = 10
        size = 5
        data = self.create_test_data(batch_size, sentence_length, word_size)

        with tf.Graph().as_default() as q_conv:
            qrnn = QRNN(in_size=word_size, size=size, conv_size=3)
            X = tf.placeholder(tf.float32, [batch_size, sentence_length, word_size])
            forward_graph = qrnn.forward(X)

            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                hidden = sess.run(forward_graph, feed_dict={X: data})
                self.assertEqual((batch_size, size), hidden.shape)
test.py 文件源码 项目:yolo_tensorflow 作者: hizhangp 项目源码 文件源码 阅读 51 收藏 0 点赞 0 评论 0
def __init__(self, net, weight_file):
        self.net = net
        self.weights_file = weight_file

        self.classes = cfg.CLASSES
        self.num_class = len(self.classes)
        self.image_size = cfg.IMAGE_SIZE
        self.cell_size = cfg.CELL_SIZE
        self.boxes_per_cell = cfg.BOXES_PER_CELL
        self.threshold = cfg.THRESHOLD
        self.iou_threshold = cfg.IOU_THRESHOLD
        self.boundary1 = self.cell_size * self.cell_size * self.num_class
        self.boundary2 = self.boundary1 + self.cell_size * self.cell_size * self.boxes_per_cell

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())

        print 'Restoring weights from: ' + self.weights_file
        self.saver = tf.train.Saver()
        self.saver.restore(self.sess, self.weights_file)
inference.py 文件源码 项目:dl-classification 作者: matthieuo 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def __init__(self, check_):
        self.img_feed = tf.placeholder(tf.float32)

        self.output_logits = tf.nn.softmax(
            models.foodv_test(
                self.img_feed,
                reg_val=0.0,
                is_train=False,
                dropout_p=1.0))

        self.sess = tf.Session()

        self.checkpoint_name = check_

        saver = tf.train.Saver()
        print("loading model...")

        saver.restore(self.sess, self.checkpoint_name)

        print("Model loaded !")
loglikes.py 文件源码 项目:tf_rnnlm 作者: Ubiqus 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _run(self):
    with tf.Session() as session:
      self.io.restore_session(session)

      inputs = sys.stdin
      singsen = SingleSentenceData()
      scounter = SpeedCounter().start()
      while True:
        senlen = singsen.read_from_file(sys.stdin, self.io.w2id)
        if senlen is None:
          break
        if senlen < 2:
          print(-9999)
          continue

        o = run_epoch(session, self.test_model, singsen)
        scounter.next()
        if self.params.progress and scounter.val % 20 ==0:
          print("\rLoglikes per secs: %f" % scounter.speed, end="", file=sys.stderr)
        print("%f" % o)
test-embedding.py 文件源码 项目:deep-learning 作者: ljanyst 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __init__(self, embedding):
        self.sess         = tf.Session()
        self.inputs       = tf.placeholder(tf.float32,
                                           [None, embedding.shape[1]],
                                           name='inputs')
        self.test_vec     = tf.placeholder(tf.float32, [1, embedding.shape[1]],
                                           name='test_vec')
        self.cos_distance = tf.matmul(self.inputs, tf.transpose(self.test_vec))

        #-----------------------------------------------------------------------
        # Compute normalized embedding matrix
        #-----------------------------------------------------------------------
        row_sum    = tf.reduce_sum(tf.square(self.inputs), axis=1,
                                   keep_dims=True)
        norm       = tf.sqrt(row_sum)
        self.normalized = self.inputs / norm
        self.embedding = self.sess.run(self.normalized,
                                       feed_dict={self.inputs: embedding})

    #---------------------------------------------------------------------------
wrapper.py 文件源码 项目:vae-npvc 作者: JeremyCCHsu 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def configure_gpu_settings(gpu_cfg=None):
    session_conf = None
    if gpu_cfg:
        with open(gpu_cfg) as f:
            cfg = json.load(f)
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=cfg['per_process_gpu_memory_fraction'])
        session_conf = tf.ConfigProto(
            allow_soft_placement=cfg['allow_soft_placement'],
            log_device_placement=cfg['log_device_placement'],
            inter_op_parallelism_threads=cfg['inter_op_parallelism_threads'],
            intra_op_parallelism_threads=cfg['intra_op_parallelism_threads'],
            gpu_options=gpu_options)
        # Timeline
        # jit_level = 0
        # session_conf.graph_options.optimizer_options.global_jit_level = jit_level
    #     sess = tf.Session(
    #         config=session_conf)
    # else:
    #     sess = tf.Session()
    return session_conf
train_val.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def train_net(network, imdb, roidb, valroidb, output_dir, tb_dir,
              pretrained_model=None,
              max_iters=40000):
  """Train a Faster R-CNN network."""
  roidb = filter_roidb(roidb)
  valroidb = filter_roidb(valroidb)

  tfconfig = tf.ConfigProto(allow_soft_placement=True)
  tfconfig.gpu_options.allow_growth = True

  with tf.Session(config=tfconfig) as sess:
    sw = SolverWrapper(sess, network, imdb, roidb, valroidb, output_dir, tb_dir,
                       pretrained_model=pretrained_model)
    print('Solving...')
    sw.train_model(sess, max_iters)
    print('done solving')
sparse_test.py 文件源码 项目:cxflow-tensorflow 作者: Cognexa 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def test_dense_to_sparse(self):
        """ Test if `dense_to_sparse` works properly."""

        with tf.Session().as_default():
            dense = tf.constant([[1., 2., 0.], [0., 0., 3.]], dtype=tf.float32)

            sparse = dense_to_sparse(dense)

            self.assertTrue(np.array_equal(sparse.indices.eval(), np.array([[0, 0], [0, 1], [1, 2]])))
            self.assertTrue(np.array_equal(sparse.values.eval(), np.array([1., 2., 3.])))

            mask = tf.constant([[0, 1, 0], [1, 0, 0]], dtype=tf.int32)

            masked = dense_to_sparse(dense, mask)
            self.assertTrue(np.array_equal(masked.indices.eval(), np.array([[0, 1], [1, 0]])))
            self.assertTrue(np.array_equal(masked.values.eval(), np.array([2., 0.])))
repeat_test.py 文件源码 项目:cxflow-tensorflow 作者: Cognexa 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def test_repeat(self):
        """ Test if `repeat` works the same as np.repeat."""

        with tf.Session().as_default():
            # try different tensor types
            for npdtype, tfdtype in [(np.int32, tf.int32), (np.float32, tf.float32)]:
                for init_value in [np.array([0, 1, 2, 3], dtype=npdtype),
                                   np.array([[0, 1], [2, 3], [4, 5]], dtype=npdtype)]:
                    # and all their axes
                    for axis in range(len(init_value.shape)):
                        for repeats in [1, 2, 3, 11]:
                            tensor = tf.constant(init_value, dtype=tfdtype)

                            repeated_value = repeat(tensor, repeats=repeats, axis=axis).eval()
                            expected_value = np.repeat(init_value, repeats=repeats, axis=axis)

                            self.assertTrue(np.all(repeated_value == expected_value))
utils_test.py 文件源码 项目:cxflow-tensorflow 作者: Cognexa 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def test_create_optimizer(self):
        """Test if create optimizer does work with tf optimizers."""

        optimizer_config = {'learning_rate': 0.1}

        # test missing required entry `class`
        self.assertRaises(AssertionError, create_optimizer, optimizer_config)

        optimizer_config['class'] = 'tensorflow.python.training.gradient_descent.GradientDescentOptimizer'

        with tf.Session().as_default():
            # test if the optimizer is created correctlyW
            optimizer = create_optimizer(optimizer_config)
            self.assertIsInstance(optimizer, tf.train.GradientDescentOptimizer)

            # test if learning_rate variable is created with the correct value
            lr_tensor = tf.get_default_graph().get_tensor_by_name('learning_rate:0')
            tf.get_default_session().run(tf.global_variables_initializer())
            self.assertAlmostEqual(lr_tensor.eval(), 0.1)

        optimizer_config2 = {'learning_rate': 0.1, 'class': 'tensorflow.python.training.momentum.MomentumOptimizer'}

        # test missing required argument (momentum in this case)
        with tf.Graph().as_default():
            self.assertRaises(TypeError, create_optimizer, optimizer_config2)
movielens_vae_test.py 文件源码 项目:VAE-MF-TensorFlow 作者: arongdari 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def cross_validation():
    M = read_dataset()
    n_fold = 10

    rating_idx = np.array(M.nonzero()).T
    kf = KFold(n_splits=n_fold, random_state=0)

    with tf.Session() as sess:
        model = VAEMF(sess, num_user, num_item,
                      hidden_encoder_dim=hidden_encoder_dim, hidden_decoder_dim=hidden_decoder_dim,
                      latent_dim=latent_dim, output_dim=output_dim, learning_rate=learning_rate, batch_size=batch_size, reg_param=reg_param)

        for i, (train_idx, test_idx) in enumerate(kf.split(rating_idx)):
            print("{0}/{1} Fold start| Train size={2}, Test size={3}".format(i,
                                                                             n_fold, train_idx.size, test_idx.size))
            model.train(M, train_idx=train_idx,
                        test_idx=test_idx, n_steps=n_steps)
movielens_test.py 文件源码 项目:VAE-MF-TensorFlow 作者: arongdari 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def cross_validation():
    M = read_dataset()
    n_fold = 10

    rating_idx = np.array(M.nonzero()).T
    kf = KFold(n_splits=n_fold, random_state=0)

    with tf.Session() as sess:
        model = VAEMF(sess, num_user, num_item,
                      hidden_encoder_dim=hidden_encoder_dim, hidden_decoder_dim=hidden_decoder_dim,
                      latent_dim=latent_dim, output_dim=output_dim, learning_rate=learning_rate, batch_size=batch_size, reg_param=reg_param, one_hot=one_hot)

        for i, (train_idx, test_idx) in enumerate(kf.split(rating_idx)):
            print("{0}/{1} Fold start| Train size={2}, Test size={3}".format(i,
                                                                             n_fold, train_idx.size, test_idx.size))
            model.train(M, train_idx=train_idx,
                        test_idx=test_idx, n_steps=n_steps)
unet.py 文件源码 项目:lung-cancer-detector 作者: YichenGong 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def predict(self, model_path, x_test):
        """
        Uses the model to create a prediction for the given data

        :param model_path: path to the model checkpoint to restore
        :param x_test: Data to predict on. Shape [n, nx, ny, channels]
        :returns prediction: The unet prediction Shape [n, px, py, labels] (px=nx-self.offset/2) 
        """

        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            # Initialize variables
            sess.run(init)

            # Restore model weights from previously saved model
            self.restore(sess, model_path)

            y_dummy = np.empty((x_test.shape[0], x_test.shape[1], x_test.shape[2], self.n_class))
            prediction = sess.run(self.predicter, feed_dict={self.x: x_test, self.y: y_dummy, self.keep_prob: 1.})

        return prediction
aggressive_multi_head_UNET_2d.py 文件源码 项目:lung-cancer-detector 作者: YichenGong 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def start(self, restore=False):
        self._sess = tf.Session()
        self._init = tf.global_variables_initializer()
        self._saver = tf.train.Saver()

        self._summary = tf.summary.merge_all()
        self._summary_writer = tf.summary.FileWriter(self.config.model_save_path, graph=self._sess.graph)
        self._summary_writer.flush()

        self._sess.run(self._init)

        if restore:
            checkpoint = tf.train.get_checkpoint_state(self.config.model_save_path)
            if checkpoint and checkpoint.model_checkpoint_path:
                tf.train.restore(self._sess, checkpoint.model_checkpoint_path)

        self._started = True
train.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def __init__(self,
                 z_dim, image_size,
                 lr_d, lr_g):

        self.sess = tf.Session()

        self.z_dim = z_dim
        self.image_size = image_size

        self.gen = GeneratorDeconv(input_size = z_dim,
                                   image_size = image_size)
        self.disc = Discriminator()

        self._build_graph(lr_d = lr_d, lr_g = lr_g)

        self.saver = tf.train.Saver()
        self.sess.run(tf.global_variables_initializer())
train.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def __init__(self,
                 label_size,
                 z_dim, image_size,
                 lr_d, lr_g):

        self.sess = tf.Session()

        self.label_size = label_size
        self.z_dim = z_dim
        self.image_size = image_size

        self.gen = GeneratorDeconv(input_size = z_dim+label_size,
                                   image_size = image_size)
        self.disc = Discriminator()

        self._build_graph(lr_d = lr_d, lr_g = lr_g)

        self.saver = tf.train.Saver()
        self.sess.run(tf.global_variables_initializer())
BaseUnet.py 文件源码 项目:kaggle-review 作者: daxiongshu 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def predictPL(self):
        B = self.flags.batch_size
        W,H,C = self.flags.width, self.flags.height, self.flags.color
        inputs = tf.placeholder(dtype=tf.float32,shape=[None,H,W,C])

        #with open(self.flags.pred_path,'w') as f:
        #    pass

        self._build(inputs,resize=False)
        counter = 0
        with tf.Session() as sess:
            self.sess = sess
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            for imgs,imgnames in self.DATA.test_generator():
                pred = sess.run(self.logit,feed_dict={inputs:imgs})
                np.save("%s/%d.npy"%(self.flags.pred_path,counter),{"pred":pred,"name":imgnames})
                counter+=len(imgs)
                if counter/B%10 ==0:
                    print_mem_time("%d images predicted"%counter)

    # train with placeholders
BaseModel.py 文件源码 项目:kaggle-review 作者: daxiongshu 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def predict_from_placeholder(self,activation=None):
        self._build()
        self._get_summary()
        if activation is not None:
            self.logit = self._activate(self.logit,activation)
        with open(self.flags.pred_path,'w') as f:
            pass
        count = 0
        with tf.Session() as sess:
            self.sess = sess
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            if self.flags.log_path and self.flags.visualize is not None:
                summary_writer = tf.summary.FileWriter(self.flags.log_path, sess.graph)
            for batch in self._batch_gen_test():
                x,_,epoch = batch
                if self.flags.log_path and self.flags.visualize is not None:
                    summary,pred = sess.run([self.summ_op,self.logit],feed_dict={self.inputs:x,self.is_training:0})
                    summary_writer.add_summary(summary, count)
                else:
                    pred = sess.run(self.logit,feed_dict={self.inputs:x,self.is_training:0})
                count+=1
                if count%self.flags.verbosity == 0:
                    print_mem_time("Epoch %d Batch %d "%(epoch,count))
                self.write_pred(pred)
p71_TextRCNN_model.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test():
    #below is a function test; if you use this for text classifiction, you need to tranform sentence to indices of vocabulary first. then feed data to the graph.
    num_classes=10
    learning_rate=0.01
    batch_size=8
    decay_steps=1000
    decay_rate=0.9
    sequence_length=5
    vocab_size=10000
    embed_size=100
    is_training=True
    dropout_keep_prob=1#0.5
    textRNN=TextRCNN(num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,vocab_size,embed_size,is_training)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(100):
            input_x=np.zeros((batch_size,sequence_length)) #[None, self.sequence_length]
            input_y=input_y=np.array([1,0,1,1,1,2,1,1]) #np.zeros((batch_size),dtype=np.int32) #[None, self.sequence_length]
            loss,acc,predict,_=sess.run([textRNN.loss_val,textRNN.accuracy,textRNN.predictions,textRNN.train_op],
                                        feed_dict={textRNN.input_x:input_x,textRNN.input_y:input_y,textRNN.dropout_keep_prob:dropout_keep_prob})
            print("loss:",loss,"acc:",acc,"label:",input_y,"prediction:",predict)
#test()
p5_fastTextB_model.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def test():
    #below is a function test; if you use this for text classifiction, you need to tranform sentence to indices of vocabulary first. then feed data to the graph.
    num_classes=19
    learning_rate=0.01
    batch_size=8
    decay_steps=1000
    decay_rate=0.9
    sequence_length=5
    vocab_size=10000
    embed_size=100
    is_training=True
    dropout_keep_prob=1
    fastText=fastTextB(num_classes, learning_rate, batch_size, decay_steps, decay_rate,5,sequence_length,vocab_size,embed_size,is_training)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(100):
            input_x=np.zeros((batch_size,sequence_length),dtype=np.int32) #[None, self.sequence_length]
            input_y=input_y=np.array([1,0,1,1,1,2,1,1],dtype=np.int32) #np.zeros((batch_size),dtype=np.int32) #[None, self.sequence_length]
            loss,acc,predict,_=sess.run([fastText.loss_val,fastText.accuracy,fastText.predictions,fastText.train_op],
                                        feed_dict={fastText.sentence:input_x,fastText.labels:input_y})
            print("loss:",loss,"acc:",acc,"label:",input_y,"prediction:",predict)
#test()
p8_TextRNN_model.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def test():
    #below is a function test; if you use this for text classifiction, you need to tranform sentence to indices of vocabulary first. then feed data to the graph.
    num_classes=10
    learning_rate=0.01
    batch_size=8
    decay_steps=1000
    decay_rate=0.9
    sequence_length=5
    vocab_size=10000
    embed_size=100
    is_training=True
    dropout_keep_prob=1#0.5
    textRNN=TextRNN(num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,vocab_size,embed_size,is_training)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(100):
            input_x=np.zeros((batch_size,sequence_length)) #[None, self.sequence_length]
            input_y=input_y=np.array([1,0,1,1,1,2,1,1]) #np.zeros((batch_size),dtype=np.int32) #[None, self.sequence_length]
            loss,acc,predict,_=sess.run([textRNN.loss_val,textRNN.accuracy,textRNN.predictions,textRNN.train_op],feed_dict={textRNN.input_x:input_x,textRNN.input_y:input_y,textRNN.dropout_keep_prob:dropout_keep_prob})
            print("loss:",loss,"acc:",acc,"label:",input_y,"prediction:",predict)
test_base.py 文件源码 项目:tfutils 作者: neuroailab 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def custom_train_loop(sess, train_targets, **loop_params):
        """Define Custom training loop.

        Args:
            sess (tf.Session): Current tensorflow session.
            train_targets (list): Description.
            **loop_params: Optional kwargs needed to perform custom train loop.

        Returns:
            dict: A dictionary containing train targets evaluated by the session.

        """
        train_results = sess.run(train_targets)
        for i, result in enumerate(train_results):
            print('Model {} has loss {}'.format(i, result['loss']))
        return train_results
test_dbinterface.py 文件源码 项目:tfutils 作者: neuroailab 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def setUp(self):
        """Set up class before _each_ test method is executed.

        Creates a tensorflow session and instantiates a dbinterface.

        """
        self.setup_model()
        self.sess = tf.Session(
            config=tf.ConfigProto(
                allow_soft_placement=True,
                gpu_options=tf.GPUOptions(allow_growth=True),
                log_device_placement=self.params['log_device_placement'],
                inter_op_parallelism_threads=self.params['inter_op_parallelism_threads']))

        # TODO: Determine whether this should be called here or
        # in dbinterface.initialize()
        self.sess.run(tf.global_variables_initializer())

        self.dbinterface = base.DBInterface(sess=self.sess,
                                            params=self.params,
                                            cache_dir=self.CACHE_DIR,
                                            save_params=self.save_params,
                                            load_params=self.load_params)

        self.step = 0
test.py 文件源码 项目:tfutils 作者: neuroailab 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def custom_train_loop(sess, train_targets, **loop_params):
    """Define Custom training loop.

    Args:
        sess (tf.Session): Current tensorflow session.
        train_targets (list): Description.
        **loop_params: Optional kwargs needed to perform custom train loop.

    Returns:
        dict: A dictionary containing train targets evaluated by the session.

    """
    print('Calling custom training loop...')
    train_results = sess.run(train_targets)
    for i, result in enumerate(train_results):
        print('Model {} has loss {}'.format(i, result['loss']))
    return train_results
test_data_tfrecords.py 文件源码 项目:tfutils 作者: neuroailab 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_ops():
    """Tests the basic init_ops funcions.
    """
    dp = d.TFRecordsParallelByFileProvider(source_paths,
                                           trans_dicts=trans_dicts,
                                           n_threads=4,
                                           batch_size=20,
                                           shuffle=False)
    sess = tf.Session()
    tf.train.start_queue_runners(sess=sess)

    N = 1000
    for i in range(N):
        res = sess.run([[fq.dequeue() for fq in fqs] for fqs in dp.file_queues])
        x, y = res[0]
        print('%d of %d' % (i, N))
        assert x.split('/')[-1] == y.split('/')[-1]


问题


面经


文章

微信
公众号

扫码关注公众号