def conv_net_on(self, input_layer, opts):
# TODO: reinclude batch_norm config, hasn't been helping at all...
# convert input_layer from uint8 (0, 255) to float32 (0.0, 1.0)
input_layer = tf.to_float(input_layer) / 255
# whiten image, per channel, using batch_normalisation layer with
# params calculated directly from batch.
axis = list(range(input_layer.get_shape().ndims - 1))
batch_mean, batch_var = tf.nn.moments(input_layer, axis) # calcs moments per channel
whitened_input_layer = tf.nn.batch_normalization(input_layer, batch_mean, batch_var,
scale=None, offset=None,
variance_epsilon=1e-6)
model = slim.conv2d(whitened_input_layer, num_outputs=8, kernel_size=[5, 5], scope='conv1a')
# model = slim.conv2d(whitened_input_layer, num_outputs=8, kernel_size=[5, 5], scope='conv1b')
model = slim.max_pool2d(model, kernel_size=[2, 2], scope='pool1')
self.pool1 = model
print >>sys.stderr, "pool1", util.shape_and_product_of(model)
model = slim.conv2d(model, num_outputs=16, kernel_size=[5, 5], scope='conv2a')
# model = slim.conv2d(model, num_outputs=16, kernel_size=[5, 5], scope='conv2b')
model = slim.max_pool2d(model, kernel_size=[2, 2], scope='pool2')
self.pool2 = model
print >>sys.stderr, "pool2", util.shape_and_product_of(model)
model = slim.conv2d(model, num_outputs=32, kernel_size=[3, 3], scope='conv3a')
# model = slim.conv2d(model, num_outputs=32, kernel_size=[3, 3], scope='conv3b')
model = slim.max_pool2d(model, kernel_size=[2, 2], scope='pool3')
self.pool3 = model
print >>sys.stderr, "pool3", util.shape_and_product_of(model)
# a final unpooled conv net just to drop params down. maybe pool here too actually?
# model = slim.conv2d(model, num_outputs=32, kernel_size=[3, 3], scope='conv4a')
# model = slim.conv2d(model, num_outputs=32, kernel_size=[3, 3], scope='conv3b')
# model = slim.max_pool2d(model, kernel_size=[2, 2], scope='pool4')
# self.pool3 = model
# print >>sys.stderr, "pool4", util.shape_and_product_of(model)
# do simple maxout on output to reduce dimensionality down for the upcoming
# fully connected layers. see https://arxiv.org/abs/1302.4389
# model = tf.reshape(model, (-1, 15, 20, 8, 4)) # (?, 15, 20, 32) -> (?, 15, 20, 8, 4)
# model = tf.reduce_max(model, reduction_indices=4) # (?, 15, 20, 8)
# print >>sys.stderr, "maxout", util.shape_and_product_of(model)
model = slim.flatten(model, scope='flat')
if opts.use_dropout:
model = slim.dropout(model, is_training=IS_TRAINING, scope="drop" % i)
return model
评论列表
文章目录