def bottleneck_trans_same(inputs, depth, depth_bottleneck, stride, rate=1,
outputs_collections=None, scope=None):
"""Bottleneck residual unit variant with BN after convolutions.
This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
its definition. Note that we use here the bottleneck variant which has an
extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_trans', [inputs]) as sc:
shortcut = slim.conv2d_transpose(inputs, depth, 3, stride=stride,
activation_fn=None, scope='shortcut', padding='SAME')
residual = slim.conv2d_transpose(inputs, depth_bottleneck, [1, 1], stride=1,
scope='conv1_trans')
residual = slim.conv2d_transpose(residual, depth_bottleneck, 3, stride=stride, scope='conv2', padding='SAME')
residual = slim.conv2d_transpose(residual, depth, [1, 1], stride=1,
activation_fn=None, scope='conv3_trans')
output = tf.nn.relu(shortcut + residual)
return slim.utils.collect_named_outputs(outputs_collections,
sc.original_name_scope,
output)
评论列表
文章目录