def separable_conv(self, input, k_h, k_w, c_o, stride, name, relu=True):
with slim.arg_scope([slim.batch_norm], fused=common.batchnorm_fused):
output = slim.separable_convolution2d(input,
num_outputs=None,
stride=stride,
trainable=self.trainable,
depth_multiplier=1.0,
kernel_size=[k_h, k_w],
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
# weights_initializer=tf.truncated_normal_initializer(stddev=0.09),
weights_regularizer=tf.contrib.layers.l2_regularizer(0.00004),
biases_initializer=None,
padding=DEFAULT_PADDING,
scope=name + '_depthwise')
output = slim.convolution2d(output,
c_o,
stride=1,
kernel_size=[1, 1],
activation_fn=tf.nn.relu if relu else None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
# weights_initializer=tf.truncated_normal_initializer(stddev=0.09),
biases_initializer=slim.init_ops.zeros_initializer(),
normalizer_fn=slim.batch_norm,
trainable=self.trainable,
weights_regularizer=tf.contrib.layers.l2_regularizer(common.regularizer_dsconv),
# weights_regularizer=None,
scope=name + '_pointwise')
return output
python类batch_norm()的实例源码
def conv_block(self, input, out_size, layer, kernalsize=3, l2_penalty=1e-8, shortcut=False):
in_shape = input.get_shape().as_list()
if layer>0:
filter_shape = [kernalsize, 1, in_shape[3], out_size]
else:
filter_shape = [kernalsize, in_shape[2], 1, out_size]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W-%s" % layer)
b = tf.Variable(tf.constant(0.1, shape=[out_size]), name="b-%s" % layer)
tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(W))
tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(b))
if layer>0:
conv = tf.nn.conv2d(input, W, strides=[1, 1, 1, 1], padding="SAME", name="conv-%s" % layer)
else:
conv = tf.nn.conv2d(input, W, strides=[1, 1, 1, 1], padding="VALID", name="conv-%s" % layer)
if shortcut:
shortshape = [1,1,in_shape[3], out_size]
Ws = tf.Variable(tf.truncated_normal(shortshape, stddev=0.05), name="Ws-%s" % layer)
tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(Ws))
conv = conv + tf.nn.conv2d(input, Ws, strides=[1, 1, 1, 1], padding="SAME", name="conv-shortcut-%s" % layer)
h = tf.nn.bias_add(conv, b)
h2 = tf.nn.relu(tf.contrib.layers.batch_norm(h, center=True, scale=True, epsilon=1e-5, decay=0.9), name="relu-%s" % layer)
return h2
def resnet_arg_scope(is_training=True,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'is_training': False,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'trainable': False,
'updates_collections': tf.GraphKeys.UPDATE_OPS
}
with arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
weights_initializer=slim.variance_scaling_initializer(),
trainable=is_training,
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
return arg_sc
def arg_scope(self):
"""Configure the neural network's layers."""
batch_norm_params = {
"is_training" : self.is_training,
"decay" : 0.9997,
"epsilon" : 0.001,
"variables_collections" : {
"beta" : None,
"gamma" : None,
"moving_mean" : ["moving_vars"],
"moving_variance" : ["moving_vars"]
}
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(
stddev=self._hparams.init_stddev),
weights_regularizer=slim.l2_regularizer(
self._hparams.regularize_constant),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
def _extra_conv_arg_scope_with_bn(weight_decay=0.00001,
activation_fn=None,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
def _bn_relu_conv_block(input,
filters,
kernel=(3, 3),
stride=(1, 1),
weight_decay=5e-4):
''' Adds a Batchnorm-Relu-Conv block for DPN
Args:
input: input tensor
filters: number of output filters
kernel: convolution kernel size
stride: stride of convolution
Returns: a keras tensor
'''
channel_axis = -1
x = slim.conv2d(input, filters, kernel, padding='SAME', stride=stride,
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=None)
x = slim.batch_norm(x)
x = tf.nn.relu(x)
return x
def _root_block(input,
initial_conv_filters,
weight_decay=5e-4,
ksize=(7,7),
is_pool=True):
''' Adds an initial conv block, with batch norm and relu for the DPN
Args:
input: input tensor
initial_conv_filters: number of filters for initial conv block
weight_decay: weight decay factor
Returns: a keras tensor
'''
x = slim.conv2d(input,
initial_conv_filters,
ksize,
padding='SAME',
stride=(1, 1),
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=None)
x = slim.batch_norm(x)
x = tf.nn.relu(x)
if is_pool:
x = slim.max_pool2d(x, (3, 3), stride=(2, 2), padding='SAME')
return x
def inference(images, keep_probability, phase_train=True,
bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v2(images, is_training=phase_train,
dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
def inference(images, keep_probability, phase_train=True,
bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v1(images, is_training=phase_train,
dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
def encoder(self, images, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = images
net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b')
net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b')
net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b')
net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b')
net = slim.flatten(net)
fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
return fc1, fc2
def encoder(self, images, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
net = slim.conv2d(net, 512, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_5')
net = slim.flatten(net)
fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
return fc1, fc2
def inference(inputs, keep_prob,
bottleneck_size=128,
phase_train=True,
weight_decay=0.0,
reuse=None):
batch_norm_params = {
'decay': 0.995,
'epsilon': 0.001,
'updates_collections': None,
# 'scale': True, # [test1]
'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES]} # [test2: removed from 'trainable_variables']
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_regularizer=slim.l2_regularizer(weight_decay), # [test4: add weight_decay to biases]):
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v2(
inputs,
is_training=phase_train,
keep_prob=keep_prob,
bottleneck_size=bottleneck_size,
reuse=reuse)
def inference(inputs, keep_prob,
bottleneck_size=128,
phase_train=True,
weight_decay=0.0,
reuse=None):
batch_norm_params = {
'decay': 0.995,
'epsilon': 0.001,
'updates_collections': None,
# 'scale': True} # [test1: add 'gamma']
'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES]} # [test2: removed from 'trainable_variables']
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params): # [test4: add weight_decay to biases]):
return inception_resnet_v2(
inputs,
is_training=phase_train,
keep_prob=keep_prob,
bottleneck_size=bottleneck_size,
reuse=reuse)
def inference(inputs, keep_prob,
bottleneck_size=128,
phase_train=True,
weight_decay=0.0,
reuse=None):
batch_norm_params = {
'decay': 0.995,
'epsilon': 0.001,
'updates_collections': None,
# 'scale': True, # [test1]
'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES]} # [test2]
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v1(inputs, is_training=phase_train, keep_prob=keep_prob,
bottleneck_size=bottleneck_size, reuse=reuse)
def batchnorm(bottom, is_train, num_reference, epsilon=1e-3, decay=0.999, name=None):
""" virtual batch normalization (poor man's version)
the first half is the true batch, the second half is the reference batch.
When num_reference = 0, it is just typical batch normalization.
To use virtual batch normalization in test phase, "update_popmean.py" needed to be executed first
(in order to store the mean and variance of the reference batch into pop_mean and pop_variance of batchnorm.)
"""
batch_size = bottom.get_shape().as_list()[0]
inst_size = batch_size - num_reference
instance_weight = np.ones([batch_size])
if inst_size > 0:
reference_weight = 1.0 - (1.0 / ( num_reference + 1.0))
instance_weight[0:inst_size] = 1.0 - reference_weight
instance_weight[inst_size:] = reference_weight
else:
decay = 0.0
return slim.batch_norm(bottom, activation_fn=None, is_training=is_train, decay=decay, scale=True, scope=name, batch_weights=instance_weight)
def batchnorm(bottom, is_train, num_reference, epsilon=1e-3, decay=0.999, name=None):
""" virtual batch normalization (poor man's version)
the first half is the true batch, the second half is the reference batch.
When num_reference = 0, it is just typical batch normalization.
To use virtual batch normalization in test phase, "update_popmean.py" needed to be executed first
(in order to store the mean and variance of the reference batch into pop_mean and pop_variance of batchnorm.)
"""
batch_size = bottom.get_shape().as_list()[0]
inst_size = batch_size - num_reference
instance_weight = np.ones([batch_size])
if inst_size > 0:
reference_weight = 1.0 - (1.0 / ( num_reference + 1.0))
instance_weight[0:inst_size] = 1.0 - reference_weight
instance_weight[inst_size:] = reference_weight
else:
decay = 0.0
return slim.batch_norm(bottom, activation_fn=None, is_training=is_train, decay=decay, scale=True, scope=name, batch_weights=instance_weight)
def batchnorm(bottom, is_train, num_reference, epsilon=1e-3, decay=0.999, name=None):
""" virtual batch normalization (poor man's version)
the first half is the true batch, the second half is the reference batch.
When num_reference = 0, it is just typical batch normalization.
To use virtual batch normalization in test phase, "update_popmean.py" needed to be executed first
(in order to store the mean and variance of the reference batch into pop_mean and pop_variance of batchnorm.)
"""
batch_size = bottom.get_shape().as_list()[0]
inst_size = batch_size - num_reference
instance_weight = np.ones([batch_size])
if inst_size > 0:
reference_weight = 1.0 - (1.0 / ( num_reference + 1.0))
instance_weight[0:inst_size] = 1.0 - reference_weight
instance_weight[inst_size:] = reference_weight
else:
decay = 0.0
return slim.batch_norm(bottom, activation_fn=None, is_training=is_train, decay=decay, scale=True, scope=name, batch_weights=instance_weight)
def _extra_conv_arg_scope_with_bn(weight_decay=0.00001,
activation_fn=None,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': tf.GraphKeys.UPDATE_OPS_EXTRA,
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
def resnet_arg_scope(is_training=True,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'is_training': False,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'trainable': False,
'updates_collections': tf.GraphKeys.UPDATE_OPS
}
with arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
weights_initializer=slim.variance_scaling_initializer(),
trainable=is_training,
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
return arg_sc
def discriminator(self, x, name, reuse=None):
with tf.variable_scope(name, reuse=reuse):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.contrib.layers.variance_scaling_initializer(),
weights_regularizer=slim.l2_regularizer(2e-4)):
with slim.arg_scope([slim.conv2d], padding="SAME", stride=2, kernel_size=4):
net = slim.conv2d(x, self.df_dim)
net = lrelu(net)
mul = 2
for bn in self.d_bn:
net = slim.conv2d(net, self.df_dim * mul)
net = bn(net)
net = lrelu(net)
mul *= 2
net = tf.reshape(net, shape=[-1, 2*2*512])
net = slim.fully_connected(net, 512, activation_fn=lrelu, normalizer_fn=slim.batch_norm)
net = slim.fully_connected(net, 1, activation_fn=tf.nn.sigmoid)
return net # return prob
def cnn_layers(inputs, scope, end_points_collection, dropout_keep_prob=0.8, is_training=True):
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=[end_points_collection]):
with slim.arg_scope([slim.conv2d],
normalizer_fn=slim.batch_norm, normalizer_params={'is_training': is_training},
activation_fn=leaky_relu):
net = slim.conv2d(inputs, 32, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
net = slim.conv2d(net, 64, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], 2, scope='pool2')
net = slim.conv2d(net, 128, [3, 3], scope='conv3')
net = slim.conv2d(net, 64, [1, 1], scope='conv4')
box_net = net = slim.conv2d(net, 128, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], 2, scope='pool5')
net = slim.conv2d(net, 256, [3, 3], scope='conv6')
net = slim.conv2d(net, 128, [1, 1], scope='conv7')
net = slim.conv2d(net, 256, [3, 3], scope='conv8')
box_net = _reorg(box_net, 2)
net = tf.concat([box_net, net], 3)
net = slim.conv2d(net, 256, [3, 3], scope='conv9')
net = slim.conv2d(net, 75, [1, 1], activation_fn=None, scope='conv10')
return net, end_points_collection
def inference(images, keep_probability, phase_train=True,
bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v2(images, is_training=phase_train,
dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
def inference(images, keep_probability, phase_train=True,
bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v1(images, is_training=phase_train,
dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
def encoder(self, images, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
net = slim.conv2d(net, 512, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_5')
net = slim.flatten(net)
fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
return fc1, fc2
def resnet_arg_scope(is_training=True,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'is_training': False,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'trainable': False,
'updates_collections': tf.GraphKeys.UPDATE_OPS
}
with arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
weights_initializer=slim.variance_scaling_initializer(),
trainable=is_training,
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
return arg_sc
def resnet_arg_scope(is_training=True):
"""Sets up the default arguments for the CIFAR-10 resnet model."""
batch_norm_params = {
'is_training': is_training,
'decay': 0.9,
'epsilon': 0.001,
'scale': True,
# This forces batch_norm to compute the moving averages in-place
# instead of using a global collection which does not work with tf.cond.
# 'updates_collections': None,
}
with slim.arg_scope([slim.conv2d, slim.batch_norm], activation_fn=lrelu):
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(0.0002),
weights_initializer=slim.variance_scaling_initializer(),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
return arg_sc
def create_base(self, inputs, is_training):
params = self._config.cnn_params
print("input dimension = {}".format(inputs.get_shape()))
with tf.name_scope('Model'):
with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn=tf.nn.relu,
# normalizer_fn=slim.batch_norm,
# normalizer_params={'is_training': is_training}
# weights_initializer=initializer = tf.contrib.layers.xavier_initializer(seed = 10)
):
# inputs is 2D with dimension (3 x feature_len)
net = slim.conv2d(inputs, params['num_filters'][0], [3,5], scope='conv1')
net = slim.conv2d(net, params['num_filters'][1], [3, 5], scope='conv2')
net = slim.conv2d(net, params['num_filters'][2], [3, 5], scope='conv3')
net = slim.flatten(net, scope='flatten1')
net = slim.fully_connected(net, params['num_fc_1'], scope='fc1')
net = slim.dropout(net, self._config.keep_prob, is_training=is_training, scope='dropout1')
logits = slim.fully_connected(net, self._config.num_classes, activation_fn=None, scope='fc2')
with tf.name_scope('output'):
predicted_classes = tf.to_int32(tf.argmax(logits, dimension=1), name='y')
return logits, predicted_classes
def inference(images, keep_probability, phase_train=True,
bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v2(images, is_training=phase_train,
dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size,
reuse=reuse)
def _extra_conv_arg_scope_with_bn(weight_decay=0.00001,
activation_fn=None,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
def inference(images, keep_probability, phase_train=True,
bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v1(images, is_training=phase_train,
dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size,
reuse=reuse)