def get_model(self,inputs, weight_decay=0.0005,is_training=False):
# End_points collect relevant activations for external use.
arg_scope = self.__arg_scope(weight_decay=weight_decay)
with slim.arg_scope(arg_scope):
end_points = {}
with tf.variable_scope('vgg_16', [inputs]):
# Original VGG-16 blocks.
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
end_points['block1'] = net
net = slim.max_pool2d(net, [2, 2], scope='pool1')
# Block 2.
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
end_points['block2'] = net
net = slim.max_pool2d(net, [2, 2], scope='pool2')
# Block 3.
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
end_points['block3'] = net
net = slim.max_pool2d(net, [2, 2], scope='pool3')
# Block 4.
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
end_points['block4'] = net
net = slim.max_pool2d(net, [2, 2], scope='pool4')
# Block 5.
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
end_points['block5'] = net
net = slim.max_pool2d(net, [3, 3], stride=1, scope='pool5')
# Additional SSD blocks.
keep_prob=0.8
with slim.arg_scope([slim.conv2d],
activation_fn=None):
with slim.arg_scope([slim.batch_norm],
activation_fn=tf.nn.relu, is_training=is_training,updates_collections=None):
with slim.arg_scope([slim.dropout],
is_training=is_training,keep_prob=keep_prob):
with tf.variable_scope(self.model_name):
return self.__additional_ssd_block(end_points, net)
评论列表
文章目录