def misconception_model(input,
window_size,
depths,
strides,
objective_functions,
is_training,
sub_count=128,
sub_layers=2,
keep_prob=0.5):
""" A misconception tower.
Args:
input: a tensor of size [batch_size, 1, width, depth].
window_size: the width of the conv and pooling filters to apply.
depth: the depth of the output tensor.
levels: the height of the tower in misconception layers.
objective_functions: a list of objective functions to add to the top of
the network.
is_training: whether the network is training.
Returns:
a tensor of size [batch_size, num_classes].
"""
layers = []
with slim.arg_scope([slim.batch_norm], decay=0.999):
with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.relu):
net = input
layers.append(net)
for depth, stride in zip(depths, strides):
net = misconception_with_bypass(net, window_size, stride,
depth, is_training)
layers.append(net)
outputs = []
for ofunc in objective_functions:
onet = net
for _ in range(sub_layers - 1):
onet = slim.conv2d(
onet,
sub_count, [1, 1],
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params={'is_training': is_training})
# Don't use batch norm on last layer, just use dropout.
onet = slim.conv2d(onet, sub_count, [1, 1], normalizer_fn=None)
# Global average pool
n = int(onet.get_shape().dims[1])
onet = slim.avg_pool2d(onet, [1, n], stride=[1, n])
onet = slim.flatten(onet)
#
onet = slim.dropout(onet, keep_prob, is_training=is_training)
outputs.append(ofunc.build(onet))
return outputs, layers
layers.py 文件源码
python
阅读 23
收藏 0
点赞 0
评论 0
评论列表
文章目录