def create_architecture(self, mode, tag=None):
training = mode == 'TRAIN'
testing = mode == 'TEST'
assert tag != None
# handle most of the regularizers here
weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
biases_regularizer = weights_regularizer
# list as many types of layers as possible, even if they are not used now
with arg_scope([slim.conv2d, slim.conv2d_in_plane,
slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected],
weights_regularizer=weights_regularizer,
biases_regularizer=biases_regularizer,
biases_initializer=tf.constant_initializer(0.0)):
self.build_network()
elbo = self.add_losses()
self._summary_op = tf.summary.merge_all()
return elbo
python类conv2d()的实例源码
def multiscale_features(graph, names, dims, size, scope='features'):
"""
extract features from multiple endpoints, do dimensionality
reduction and resize to the given size
"""
with tf.variable_scope(scope):
endpoints = []
for i, name in enumerate(names):
endpoint = graph.get_tensor_by_name(name)
if not dims is None:
endpoint = slim.conv2d(endpoint, dims[i], 1,
activation_fn=None,
normalizer_fn=None)
endpoint = tf.image.resize_images(endpoint, size[0], size[1])
endpoints.append(endpoint)
return tf.concat(3, endpoints)
def resnet_arg_scope(is_training=True,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'is_training': False,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'trainable': False,
'updates_collections': tf.GraphKeys.UPDATE_OPS
}
with arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
weights_initializer=slim.variance_scaling_initializer(),
trainable=is_training,
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
return arg_sc
cpm_body_slim.py 文件源码
项目:convolutional-pose-machines-tensorflow
作者: timctho
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def _middle_conv(self, stage):
with tf.variable_scope('stage_' + str(stage)):
self.current_featuremap = tf.concat([self.stage_heatmap[stage-2],
self.sub_stage_img_feature,
self.center_map],
axis=3)
with slim.arg_scope([slim.conv2d],
padding='SAME',
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer()):
mid_net = slim.conv2d(self.current_featuremap, 128, [7, 7], scope='mid_conv1')
mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv2')
mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv3')
mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv4')
mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv5')
mid_net = slim.conv2d(mid_net, 128, [1, 1], scope='mid_conv6')
self.current_heatmap = slim.conv2d(mid_net, self.joints, [1, 1],
scope='mid_conv7')
self.stage_heatmap.append(self.current_heatmap)
cpm_hand_slim.py 文件源码
项目:convolutional-pose-machines-tensorflow
作者: timctho
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def _middle_conv(self, stage):
with tf.variable_scope('stage_' + str(stage)):
self.current_featuremap = tf.concat([self.stage_heatmap[stage-2],
self.sub_stage_img_feature,
# self.center_map,
],
axis=3)
with slim.arg_scope([slim.conv2d],
padding='SAME',
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer()):
mid_net = slim.conv2d(self.current_featuremap, 128, [7, 7], scope='mid_conv1')
mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv2')
mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv3')
mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv4')
mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv5')
mid_net = slim.conv2d(mid_net, 128, [1, 1], scope='mid_conv6')
self.current_heatmap = slim.conv2d(mid_net, self.joints, [1, 1],
scope='mid_conv7')
self.stage_heatmap.append(self.current_heatmap)
def arg_scope(self):
"""Configure the neural network's layers."""
batch_norm_params = {
"is_training" : self.is_training,
"decay" : 0.9997,
"epsilon" : 0.001,
"variables_collections" : {
"beta" : None,
"gamma" : None,
"moving_mean" : ["moving_vars"],
"moving_variance" : ["moving_vars"]
}
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(
stddev=self._hparams.init_stddev),
weights_regularizer=slim.l2_regularizer(
self._hparams.regularize_constant),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
def upsample(x,scale=2,features=64,activation=tf.nn.relu):
assert scale in [2,3,4]
x = slim.conv2d(x,features,[3,3],activation_fn=activation)
if scale == 2:
ps_features = 3*(scale**2)
x = slim.conv2d(x,ps_features,[3,3],activation_fn=activation)
#x = slim.conv2d_transpose(x,ps_features,6,stride=1,activation_fn=activation)
x = PS(x,2,color=True)
elif scale == 3:
ps_features =3*(scale**2)
x = slim.conv2d(x,ps_features,[3,3],activation_fn=activation)
#x = slim.conv2d_transpose(x,ps_features,9,stride=1,activation_fn=activation)
x = PS(x,3,color=True)
elif scale == 4:
ps_features = 3*(2**2)
for i in range(2):
x = slim.conv2d(x,ps_features,[3,3],activation_fn=activation)
#x = slim.conv2d_transpose(x,ps_features,6,stride=1,activation_fn=activation)
x = PS(x,2,color=True)
return x
def vgg_arg_scope(weight_decay=0.0005):
"""Defines the VGG arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
def _extra_conv_arg_scope_with_bn(weight_decay=0.00001,
activation_fn=None,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
def _extra_conv_arg_scope(weight_decay=0.00001, activation_fn=None, normalizer_fn=None):
with slim.arg_scope(
[slim.conv2d, slim.conv2d_transpose],
padding='SAME',
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,) as arg_sc:
with slim.arg_scope(
[slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn) as arg_sc:
return arg_sc
def gaussian_stochastic(self, input_tensor, num_maps, scope):
"""
:param inputs_list: list of Tensors to be added and input into the block
:return: random variable single draw, mean, standard deviation, and intermediate representation
"""
with tf.variable_scope(scope):
input_tensor = tf.expand_dims(tf.expand_dims(input_tensor, 1), 1) if len(input_tensor.get_shape()) != 4 \
else input_tensor
intermediate = slim.conv2d(input_tensor, self._hidden_size, [1, 1], weights_initializer=self._initializer,
scope='conv1')
mean = slim.conv2d(intermediate, num_maps, [1, 1], weights_initializer=self._initializer,
activation_fn=None, scope='mean')
sigma2 = tf.nn.softplus(
slim.conv2d(intermediate, num_maps, [1, 1], weights_initializer=self._initializer,
activation_fn=None, scope='sigma2'))
rv_single_draw = mean + tf.sqrt(sigma2) * tf.random_normal(tf.shape(mean))
self.split_labeled_unlabeled(mean, '{}_mu'.format(scope))
self.split_labeled_unlabeled(sigma2, '{}_sigma2'.format(scope))
self.split_labeled_unlabeled(rv_single_draw, '{}_sample'.format(scope))
return rv_single_draw
def multinomial_stochastic(self, input_tensor, num_maps, scope):
"""
:param inputs_list: list of Tensors to be added and input into the block
:return: random variable single draw, mean, and intermediate representation
"""
with tf.variable_scope(scope):
input_tensor = tf.expand_dims(tf.expand_dims(input_tensor, 1), 1) if len(input_tensor.get_shape()) != 4 \
else input_tensor
intermediate = slim.conv2d(input_tensor, self._hidden_size, [1, 1], weights_initializer=self._initializer,
scope='conv1')
pi = slim.conv2d(intermediate, num_maps, [1, 1], weights_initializer=self._initializer,
activation_fn=None, scope='mean')
rv_single_draw = tf.nn.softmax(pi)
self.split_labeled_unlabeled(pi, '{}_pi'.format(scope))
self.split_labeled_unlabeled(rv_single_draw, '{}_sample'.format(scope))
return rv_single_draw
def create_architecture(self, mode, tag=None):
training = mode == 'TRAIN'
testing = mode == 'TEST'
assert tag != None
# handle most of the regularizers here
weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
biases_regularizer = weights_regularizer
# list as many types of layers as possible, even if they are not used now
with arg_scope([slim.conv2d, slim.conv2d_in_plane,
slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected],
weights_regularizer=weights_regularizer,
biases_regularizer=biases_regularizer,
biases_initializer=tf.constant_initializer(0.0)):
self.build_network()
elbo = self.add_losses()
self._summary_op = tf.summary.merge_all()
return elbo
def gaussian_stochastic(self, input_tensor, num_maps, scope):
"""
:param inputs_list: list of Tensors to be added and input into the block
:return: random variable single draw, mean, standard deviation, and intermediate representation
"""
with tf.variable_scope(scope):
input_tensor = tf.expand_dims(tf.expand_dims(input_tensor, 1), 1) if len(input_tensor.get_shape()) != 4 \
else input_tensor
intermediate = slim.conv2d(input_tensor, self._hidden_size, [1, 1], weights_initializer=self._initializer,
scope='conv1')
mean = slim.conv2d(intermediate, num_maps, [1, 1], weights_initializer=self._initializer,
activation_fn=None, scope='mean')
sigma2 = tf.nn.softplus(
slim.conv2d(intermediate, num_maps, [1, 1], weights_initializer=self._initializer,
activation_fn=None, scope='sigma2'))
rv_single_draw = mean + tf.sqrt(sigma2) * tf.random_normal(tf.shape(mean))
self.split_labeled_unlabeled(mean, '{}_mu'.format(scope))
self.split_labeled_unlabeled(sigma2, '{}_sigma2'.format(scope))
self.split_labeled_unlabeled(rv_single_draw, '{}_sample'.format(scope))
return rv_single_draw
def multinomial_stochastic(self, input_tensor, num_maps, scope):
"""
:param inputs_list: list of Tensors to be added and input into the block
:return: random variable single draw, mean, and intermediate representation
"""
with tf.variable_scope(scope):
input_tensor = tf.expand_dims(tf.expand_dims(input_tensor, 1), 1) if len(input_tensor.get_shape()) != 4 \
else input_tensor
intermediate = slim.conv2d(input_tensor, self._hidden_size, [1, 1], weights_initializer=self._initializer,
scope='conv1')
pi = slim.conv2d(intermediate, num_maps, [1, 1], weights_initializer=self._initializer,
activation_fn=None, scope='mean')
rv_single_draw = tf.nn.softmax(pi)
self.split_labeled_unlabeled(pi, '{}_pi'.format(scope))
self.split_labeled_unlabeled(rv_single_draw, '{}_sample'.format(scope))
return rv_single_draw
def classify(model_range, seg_range, feature_lr, classifier_lr):
feat_opt = tf.train.AdamOptimizer(feature_lr)
clas_opt = tf.train.AdamOptimizer(classifier_lr)
for model in model_range:
for seg in seg_range:
with tf.variable_scope('classifier-{}-{}'.format(model, seg)):
self.preds[(model, seg)] = slim.conv2d(self.feature, 500, [1, 1])
self.clas_vars[(model, seg)] = slim.get_model_variables()[-2:]
with tf.variable_scope('losses-{}-{}'.format(model, seg)):
self.losses[(model, seg)] = self.loss(self.labels, self.preds[(model, seg)])
grad = tf.gradients(self.losses[(model, seg)], self.feat_vars + self.clas_vars[(model, seg)])
train_op_feat = feat_opt.apply_gradients(zip(grad[:-2], self.feat_vars))
train_op_clas = clas_opt.apply_gradients(zip(grad[-2:], self.clas_vars[(model, seg)]))
self.train_ops[(model, seg)] = tf.group(train_op_feat, train_op_clas)
return self.losses, self.train_ops
def squeezenet(inputs,
num_classes=1000,
is_training=True,
keep_prob=0.5,
spatial_squeeze=True,
scope='squeeze'):
"""
squeezenetv1.1
"""
with tf.name_scope(scope, 'squeeze', [inputs]) as sc:
end_points_collection = sc + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.max_pool2d,
slim.avg_pool2d, fire_module],
outputs_collections=end_points_collection):
nets = squeezenet_inference(inputs, is_training, keep_prob)
nets = slim.conv2d(nets, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='logits')
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
nets = tf.squeeze(nets, [1, 2], name='logits/squeezed')
return nets, end_points
def densenet_block(inputs, layer_num, growth, bc_mode, scope, is_training, keep_prob):
with tf.variable_scope(scope, 'block1', [inputs]):
currents = inputs
for idx in xrange(layer_num):
if not bc_mode:
new_feature = slim.conv2d(currents, growth,
[3, 3], scope='conv_{:d}'.format(idx))
new_feature = slim.dropout(new_feature, keep_prob=keep_prob,
is_training=is_training,
scope='dropout_{:d}'.format(idx))
else:
new_feature = slim.conv2d(currents, growth*4,
[1, 1], scope='bottom_{:d}'.format(idx))
new_feature = slim.dropout(new_feature, keep_prob=keep_prob,
is_training=is_training,
scope='dropout_b_{:d}'.format(idx))
new_feature = slim.conv2d(new_feature, growth,
[3, 3], scope='conv_{:d}'.format(idx))
new_feature = slim.dropout(new_feature, keep_prob=keep_prob,
is_training=is_training,
scope='dropout_{:d}'.format(idx))
currents = tf.concat([currents, new_feature], axis=3)
return currents
def _create_conv_layers(self):
"""
Create convolutional layers in the Tensorflow graph according to the hyperparameters, using Tensorflow slim
library.
Returns
-------
conv_layers: list
The list of convolutional operations.
"""
lengths_set = {len(o) for o in (self._num_outputs, self._kernel_size, self._stride)}
if len(lengths_set) != 1:
msg = "The lengths of the conv. layers params vector should be same. Lengths: {}, Vectors: {}".format(
[len(o) for o in (self._num_outputs, self._kernel_size, self._stride)],
(self._num_outputs, self._kernel_size, self._stride))
raise ValueError(msg)
conv_layers = []
inputs = [self.state]
for i, (num_out, kernel, stride) in enumerate(zip(self._num_outputs, self._kernel_size, self._stride)):
layer = slim.conv2d(activation_fn=tf.nn.elu, inputs=inputs[i], num_outputs=num_out,
kernel_size=kernel, stride=stride, padding='SAME')
conv_layers.append(layer)
inputs.append(layer)
return conv_layers
def build_model(self):
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose], activation_fn=tf.nn.elu):
with tf.variable_scope('model', reuse=self.reuse_variables):
self.left_pyramid = self.scale_pyramid(self.left, 4)
if self.mode == 'train':
self.right_pyramid = self.scale_pyramid(self.right, 4)
if self.params.do_stereo:
self.model_input = tf.concat([self.left, self.right], 3)
else:
self.model_input = self.left
#build model
if self.params.encoder == 'vgg':
self.build_vgg()
elif self.params.encoder == 'resnet50':
self.build_resnet50()
else:
return None
def _bn_relu_conv_block(input,
filters,
kernel=(3, 3),
stride=(1, 1),
weight_decay=5e-4):
''' Adds a Batchnorm-Relu-Conv block for DPN
Args:
input: input tensor
filters: number of output filters
kernel: convolution kernel size
stride: stride of convolution
Returns: a keras tensor
'''
channel_axis = -1
x = slim.conv2d(input, filters, kernel, padding='SAME', stride=stride,
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=None)
x = slim.batch_norm(x)
x = tf.nn.relu(x)
return x
def _root_block(input,
initial_conv_filters,
weight_decay=5e-4,
ksize=(7,7),
is_pool=True):
''' Adds an initial conv block, with batch norm and relu for the DPN
Args:
input: input tensor
initial_conv_filters: number of filters for initial conv block
weight_decay: weight decay factor
Returns: a keras tensor
'''
x = slim.conv2d(input,
initial_conv_filters,
ksize,
padding='SAME',
stride=(1, 1),
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=None)
x = slim.batch_norm(x)
x = tf.nn.relu(x)
if is_pool:
x = slim.max_pool2d(x, (3, 3), stride=(2, 2), padding='SAME')
return x
def gcn_block(inputs,
num_class,
kernel_size,
scope=None):
with tf.variable_scope(scope, 'gcn_block', [inputs]):
with slim.arg_scope([slim.conv2d],
padding='SAME',
activation_fn=None,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=tf.contrib.layers.l2_regularizer(0.0001),
biases_initializer=tf.zeros_initializer(),
biases_regularizer=tf.contrib.layers.l2_regularizer(0.0002)):
left_conv1 = slim.conv2d(inputs, num_class, [kernel_size, 1])
left_conv2 = slim.conv2d(left_conv1, num_class, [1, kernel_size])
right_conv1 = slim.conv2d(inputs, num_class, [1, kernel_size])
right_conv2 = slim.conv2d(right_conv1, num_class, [kernel_size, 1])
result_sum = tf.add(left_conv2, right_conv2, name='gcn_module')
return result_sum
def gcn_br(inputs, scope):
with tf.variable_scope(scope, 'gcn_br', [inputs]):
with slim.arg_scope([slim.conv2d],
padding='SAME',
activation_fn=tf.nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=tf.contrib.layers.l2_regularizer(0.0001),
biases_initializer=tf.zeros_initializer(),
biases_regularizer=tf.contrib.layers.l2_regularizer(0.0002)):
num_class = inputs.get_shape()[3]
conv = slim.conv2d(inputs, num_class, [3, 3])
conv = slim.conv2d(conv, num_class, [3, 3], activation_fn=None)
result_sum = tf.add(inputs, conv, name='fcn_br')
return result_sum
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat([tower_conv, tower_conv1_1, tower_conv2_2], 3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
# Inception-Renset-B
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
scope='Conv2d_0c_3x1')
mixed = tf.concat([tower_conv, tower_conv1_2], 3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def inference(images, keep_probability, phase_train=True,
bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v2(images, is_training=phase_train,
dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 32, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 32, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat([tower_conv, tower_conv1_1, tower_conv2_2], 3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
# Inception-Renset-B
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 128, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 128, [1, 7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 128, [7, 1],
scope='Conv2d_0c_7x1')
mixed = tf.concat([tower_conv, tower_conv1_2], 3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
# Inception-Resnet-C
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 192, [1, 3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [3, 1],
scope='Conv2d_0c_3x1')
mixed = tf.concat([tower_conv, tower_conv1_2], 3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net