nets.py 文件源码

python
阅读 34 收藏 0 点赞 0 评论 0

项目:variational-dropout 作者: cjratcliff 项目源码 文件源码
def __init__(self, img_size, num_channels, num_classes):
        # Based on https://github.com/fchollet/keras/blob/master/keras/applications/vgg16.py

        self.x = tf.placeholder(tf.float32, [None,img_size,img_size,num_channels], 'x')
        self.y = tf.placeholder(tf.float32, [None,num_classes], 'y')
        self.deterministic = tf.placeholder(tf.bool, name='d')
        d = self.deterministic
        phase = tf.logical_not(d)

        def conv_bn(h, filters_in, filters_out, d, phase):
            h = Conv2DVarDropout(filters_in, filters_out, (3,3), padding='SAME', nonlinearity=tf.identity)(h,d) # Linear
            h = tf.contrib.layers.batch_norm(h, center=True, scale=False, is_training=phase)
            return tf.nn.relu(h)

        # Block 1
        h = conv_bn(self.x, num_channels, 64, d, phase)
        h = conv_bn(h, 64, 64, d, phase)
        h = MaxPooling2D((2, 2), strides=(2,2))(h)

        # Block 2
        h = conv_bn(h, 64, 128, d, phase)
        h = conv_bn(h, 128, 128, d, phase)
        h = MaxPooling2D((2, 2), strides=(2,2))(h)

        # Block 3
        h = conv_bn(h, 128, 256, d, phase)
        h = conv_bn(h, 256, 256, d, phase)
        h = conv_bn(h, 256, 256, d, phase)
        h = MaxPooling2D((2,2), strides=(2,2))(h)

        # Block 4
        h = conv_bn(h, 256, 512, d, phase)
        h = conv_bn(h, 512, 512, d, phase)
        h = conv_bn(h, 512, 512, d, phase)
        h = MaxPooling2D((2, 2), strides=(2, 2))(h)

        # Block 5
        h = conv_bn(h, 512, 512, d, phase)
        h = conv_bn(h, 512, 512, d, phase)
        h = conv_bn(h, 512, 512, d, phase)
        h = MaxPooling2D((2, 2), strides=(2, 2))(h)

        h = Flatten()(h)
        self.pred = FCVarDropout(512, num_classes, tf.nn.softmax)(h,d)

        pred = tf.clip_by_value(self.pred,eps,1-eps)

        W = tf.get_collection('W')
        log_sigma2 = tf.get_collection('log_sigma2')
        loss = sgvlb(pred, self.y, W, log_sigma2, batch_size)

        correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.pred, 1))
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')

        optimizer = tf.train.AdamOptimizer(0.0001)

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            # Ensures that we execute the update_ops before performing the train_step
            self.train_step = optimizer.minimize(loss)
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号