def create_architecture(self, mode, tag=None):
training = mode == 'TRAIN'
testing = mode == 'TEST'
assert tag != None
# handle most of the regularizers here
weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
biases_regularizer = weights_regularizer
# list as many types of layers as possible, even if they are not used now
with arg_scope([slim.conv2d, slim.conv2d_in_plane,
slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected],
weights_regularizer=weights_regularizer,
biases_regularizer=biases_regularizer,
biases_initializer=tf.constant_initializer(0.0)):
self.build_network()
elbo = self.add_losses()
self._summary_op = tf.summary.merge_all()
return elbo
python类separable_conv2d()的实例源码
def create_architecture(self, mode, tag=None):
training = mode == 'TRAIN'
testing = mode == 'TEST'
assert tag != None
# handle most of the regularizers here
weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
biases_regularizer = weights_regularizer
# list as many types of layers as possible, even if they are not used now
with arg_scope([slim.conv2d, slim.conv2d_in_plane,
slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected],
weights_regularizer=weights_regularizer,
biases_regularizer=biases_regularizer,
biases_initializer=tf.constant_initializer(0.0)):
self.build_network()
elbo = self.add_losses()
self._summary_op = tf.summary.merge_all()
return elbo
def separable_conv2d_same(inputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D separable convolution with 'SAME' padding.
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
# By passing filters=None
# separable_conv2d produces only a depth-wise convolution layer
if stride == 1:
return slim.separable_conv2d(inputs, None, kernel_size,
depth_multiplier=1, stride=1, rate=rate,
padding='SAME', scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(inputs,
[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return slim.separable_conv2d(inputs, None, kernel_size,
depth_multiplier=1, stride=stride, rate=rate,
padding='VALID', scope=scope)
# The following is adapted from:
# https://github.com/tensorflow/models/blob/master/slim/nets/mobilenet_v1.py
# Conv and DepthSepConv named tuple define layers of the MobileNet architecture
# Conv defines 3x3 convolution layers
# DepthSepConv defines 3x3 depthwise convolution followed by 1x1 convolution.
# stride is the stride of the convolution
# depth is the number of channels or filters in a layer
def mobilenet_v1_arg_scope(is_training=True,
stddev=0.09):
batch_norm_params = {
'is_training': False,
'center': True,
'scale': True,
'decay': 0.9997,
'epsilon': 0.001,
'trainable': False,
}
# Set weight_decay for weights in Conv and DepthSepConv layers.
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = tf.contrib.layers.l2_regularizer(cfg.MOBILENET.WEIGHT_DECAY)
if cfg.MOBILENET.REGU_DEPTH:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
trainable=is_training,
weights_initializer=weights_init,
activation_fn=tf.nn.relu6,
normalizer_fn=slim.batch_norm,
padding='SAME'):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d],
weights_regularizer=depthwise_regularizer) as sc:
return sc
def separable_conv2d_same(inputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D separable convolution with 'SAME' padding.
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
# By passing filters=None
# separable_conv2d produces only a depth-wise convolution layer
if stride == 1:
return slim.separable_conv2d(inputs, None, kernel_size,
depth_multiplier=1, stride=1, rate=rate,
padding='SAME', scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(inputs,
[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return slim.separable_conv2d(inputs, None, kernel_size,
depth_multiplier=1, stride=stride, rate=rate,
padding='VALID', scope=scope)
# The following is adapted from:
# https://github.com/tensorflow/models/blob/master/slim/nets/mobilenet_v1.py
# Conv and DepthSepConv named tuple define layers of the MobileNet architecture
# Conv defines 3x3 convolution layers
# DepthSepConv defines 3x3 depthwise convolution followed by 1x1 convolution.
# stride is the stride of the convolution
# depth is the number of channels or filters in a layer
def mobilenet_v1_arg_scope(is_training=True,
stddev=0.09):
batch_norm_params = {
'is_training': False,
'center': True,
'scale': True,
'decay': 0.9997,
'epsilon': 0.001,
'trainable': False,
}
# Set weight_decay for weights in Conv and DepthSepConv layers.
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = tf.contrib.layers.l2_regularizer(cfg.MOBILENET.WEIGHT_DECAY)
if cfg.MOBILENET.REGU_DEPTH:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
trainable=is_training,
weights_initializer=weights_init,
activation_fn=tf.nn.relu6,
normalizer_fn=slim.batch_norm,
padding='SAME'):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d],
weights_regularizer=depthwise_regularizer) as sc:
return sc
def separable_conv2d_same(inputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D separable convolution with 'SAME' padding.
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
# By passing filters=None
# separable_conv2d produces only a depth-wise convolution layer
if stride == 1:
return slim.separable_conv2d(inputs, None, kernel_size,
depth_multiplier=1, stride=1, rate=rate,
padding='SAME', scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(inputs,
[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return slim.separable_conv2d(inputs, None, kernel_size,
depth_multiplier=1, stride=stride, rate=rate,
padding='VALID', scope=scope)
# The following is adapted from:
# https://github.com/tensorflow/models/blob/master/slim/nets/mobilenet_v1.py
# Conv and DepthSepConv named tuple define layers of the MobileNet architecture
# Conv defines 3x3 convolution layers
# DepthSepConv defines 3x3 depthwise convolution followed by 1x1 convolution.
# stride is the stride of the convolution
# depth is the number of channels or filters in a layer
def mobilenet_v1_arg_scope(is_training=True,
stddev=0.09):
batch_norm_params = {
'is_training': False,
'center': True,
'scale': True,
'decay': 0.9997,
'epsilon': 0.001,
'trainable': False,
}
# Set weight_decay for weights in Conv and DepthSepConv layers.
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = tf.contrib.layers.l2_regularizer(cfg.MOBILENET.WEIGHT_DECAY)
if cfg.MOBILENET.REGU_DEPTH:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
trainable=is_training,
weights_initializer=weights_init,
activation_fn=tf.nn.relu6,
normalizer_fn=slim.batch_norm,
padding='SAME'):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d],
weights_regularizer=depthwise_regularizer) as sc:
return sc
mobilenet_v1_1_224.py 文件源码
项目:triplet-reid
作者: VisualComputingInstitute
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def mobilenet_v1_arg_scope(is_training=True,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
batch_norm_scale=True,
weight_decay=0.00004,
stddev=0.09,
regularize_depthwise=False):
"""Defines the default MobilenetV1 arg scope.
Args:
is_training: Whether or not we're training the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
regularize_depthwise: Whether or not apply regularization on depthwise.
Returns:
An `arg_scope` to use for the mobilenet v1 model.
"""
batch_norm_params = {
'is_training': is_training,
'center': True,
'scale': batch_norm_scale,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
}
# Set weight_decay for weights in Conv and DepthSepConv layers.
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
if regularize_depthwise:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
weights_initializer=weights_init,
activation_fn=tf.nn.relu6, normalizer_fn=slim.batch_norm):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d],
weights_regularizer=depthwise_regularizer) as sc:
return sc