pretrained.py 文件源码

python
阅读 18 收藏 0 点赞 0 评论 0

项目:SSD_tensorflow_VOC 作者: LevinJ 项目源码 文件源码
def fine_tune_inception(self):
        train_dir = '/tmp/inception_finetuned/'
        image_size = inception.inception_v4.default_image_size
        checkpoint_path = "../../data/trained_models/inception_v4/inception_v4.ckpt"
        flowers_data_dir = "../../data/flower"


        with tf.Graph().as_default():
            tf.logging.set_verbosity(tf.logging.INFO)

            dataset = flowers.get_split('train', flowers_data_dir)
            images, _, labels = self.load_batch(dataset, height=image_size, width=image_size)

            # Create the model, use the default arg scope to configure the batch norm parameters.
            with slim.arg_scope(inception.inception_v4_arg_scope()):
                logits, _ = inception.inception_v4(images, num_classes=dataset.num_classes, is_training=True)

            # Specify the loss function:
            one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
            total_loss = slim.losses.softmax_cross_entropy(logits, one_hot_labels)
#             total_loss = slim.losses.get_total_loss(add_regularization_losses=False)
#             total_loss = slim.losses.get_total_loss()

            # Create some summaries to visualize the training process:
            tf.summary.scalar('losses/Total_Loss', total_loss)

            # Specify the optimizer and create the train op:
            optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
            train_op = slim.learning.create_train_op(total_loss, optimizer)

            # Run the training:
            number_of_steps = math.ceil(dataset.num_samples/32) * 1
            final_loss = slim.learning.train(
                train_op,
                logdir=train_dir,
                init_fn=self.get_init_fn(checkpoint_path),
                number_of_steps=number_of_steps)


            print('Finished training. Last batch loss %f' % final_loss)
        return
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号