def __init__(self, img_size, num_channels, num_classes, dropout_prob=0.0):
# Based on https://github.com/fchollet/keras/blob/master/keras/applications/vgg16.py
self.x = tf.placeholder(tf.float32, [None,img_size,img_size,num_channels], 'x')
self.y = tf.placeholder(tf.float32, [None,num_classes], 'y')
self.deterministic = tf.placeholder(tf.bool, name='d')
d = self.deterministic
phase = tf.logical_not(d)
def conv_bn(h, num_filters, phase):
h = Conv2D(num_filters, (3,3), padding='same')(h) # Linear
h = tf.contrib.layers.batch_norm(h, center=True, scale=False, is_training=phase)
return tf.nn.relu(h)
# Block 1
h = conv_bn(self.x,64,phase)
h = conv_bn(h,64,phase)
h = MaxPooling2D((2, 2), strides=(2,2))(h)
# Block 2
h = conv_bn(h,128,phase)
h = conv_bn(h,128,phase)
h = MaxPooling2D((2, 2), strides=(2,2))(h)
# Block 3
h = conv_bn(h,256,phase)
h = conv_bn(h,256,phase)
h = conv_bn(h,256,phase)
h = MaxPooling2D((2,2), strides=(2,2))(h)
# Block 4
h = conv_bn(h,512,phase)
h = conv_bn(h,512,phase)
h = conv_bn(h,512,phase)
h = MaxPooling2D((2,2), strides=(2,2))(h)
# Block 5
h = conv_bn(h,512,phase)
h = conv_bn(h,512,phase)
h = conv_bn(h,512,phase)
h = MaxPooling2D((2,2), strides=(2,2))(h)
h = Flatten()(h)
self.pred = Dense(num_classes, activation='softmax')(h)
pred = tf.clip_by_value(self.pred,eps,1-eps)
loss = -tf.reduce_sum(tf.log(pred)*self.y)
correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.pred, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
optimizer = tf.train.AdamOptimizer(0.001)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# Ensures that we execute the update_ops before performing the train_step
self.train_step = optimizer.minimize(loss)
评论列表
文章目录