def vgg_16(inputs,
is_training=False,
dropout_keep_prob=0.5,
scope='vgg_16',
fc_conv_padding='VALID', reuse=None):
inputs = inputs * 255.0
inputs -= tf.constant([123.68, 116.779, 103.939], dtype=tf.float32)
with tf.variable_scope(scope, 'vgg_16', [inputs], reuse=reuse) as sc:
end_points_collection = sc.name + '_end_points'
end_points = {}
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
end_points['pool0'] = inputs
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
end_points['pool1'] = net
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
end_points['pool2'] = net
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
end_points['pool3'] = net
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
end_points['pool4'] = net
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
end_points['pool5'] = net
# # Use conv2d instead of fully_connected layers.
# net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
# net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
# scope='dropout6')
# net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
# scope='dropout7')
# net = slim.conv2d(net, num_classes, [1, 1],
# activation_fn=None,
# normalizer_fn=None,
# scope='fc8')
# Convert end_points_collection into a end_point dict.
# end_points = slim.utils.convert_collection_to_dict(end_points_collection)
return net, end_points
评论列表
文章目录