def reduction_b(net):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 256, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 256, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 256, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat([tower_conv_1, tower_conv1_1,
tower_conv2_2, tower_pool], 3)
return net
python类conv2d()的实例源码
def inference(images, keep_probability, phase_train=True,
bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v1(images, is_training=phase_train,
dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
def encoder(self, images, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = images
net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b')
net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b')
net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b')
net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b')
net = slim.flatten(net)
fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
return fc1, fc2
def encoder(self, images, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
net = slim.conv2d(net, 512, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_5')
net = slim.flatten(net)
fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
return fc1, fc2
def __create_network(self, scope, img_shape=(80, 80)):
with tf.variable_scope(self.task_name):
with tf.variable_scope(scope):
with tf.variable_scope('input_data'):
self.inputs = tf.placeholder(shape=[None, *img_shape, cfg.HIST_LEN], dtype=tf.float32)
with tf.variable_scope('networks'):
with tf.variable_scope('conv_1'):
self.conv_1 = slim.conv2d(activation_fn=tf.nn.relu, inputs=self.inputs, num_outputs=32,
kernel_size=[8, 8], stride=4, padding='SAME', trainable=self.is_train)
with tf.variable_scope('conv_2'):
self.conv_2 = slim.conv2d(activation_fn=tf.nn.relu, inputs=self.conv_1, num_outputs=64,
kernel_size=[4, 4], stride=2, padding='SAME', trainable=self.is_train)
with tf.variable_scope('conv_3'):
self.conv_3 = slim.conv2d(activation_fn=tf.nn.relu, inputs=self.conv_2, num_outputs=64,
kernel_size=[3, 3], stride=1, padding='SAME', trainable=self.is_train)
with tf.variable_scope('f_c'):
self.fc = slim.fully_connected(slim.flatten(self.conv_3), 512,
activation_fn=tf.nn.elu, trainable=self.is_train)
def block17(x, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 ResNet block."""
with tf.variable_scope(scope, 'Block17', [x], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(x, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(x, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7], scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1], scope='Conv2d_0c_7x1')
mixed = tf.concat(values=[tower_conv, tower_conv1_2], axis=3)
up = slim.conv2d(mixed, x.get_shape()[3], 1,
normalizer_fn=None,
activation_fn=None,
scope='Conv2d_1x1')
x += scale * up
if activation_fn:
x = activation_fn(x)
return x
# Inception-ResNet-C
# (2 branches)
def block8(x, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 ResNet block."""
with tf.variable_scope(scope, 'Block8', [x], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(x, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(x, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3], scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1], scope='Conv2d_0c_3x1')
mixed = tf.concat(values=[tower_conv, tower_conv1_2], axis=3)
up = slim.conv2d(mixed, x.get_shape()[3], 1,
normalizer_fn=None,
activation_fn=None,
scope='Conv2d_1x1')
x += scale * up
if activation_fn:
x = activation_fn(x)
return x
# Reduce-A
# (3 branches)
def block17(x, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 ResNet block."""
with tf.variable_scope(scope, 'Block17', [x], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(x, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(x, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7], scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1], scope='Conv2d_0c_7x1')
mixed = tf.concat(values=[tower_conv, tower_conv1_2], axis=3)
up = slim.conv2d(mixed, x.get_shape()[3], 1,
normalizer_fn=None,
activation_fn=None,
scope='Conv2d_1x1')
x += scale * up
if activation_fn:
x = activation_fn(x)
return x
# Inception-ResNet-C
# (2 branches)
def block8(x, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 ResNet block."""
with tf.variable_scope(scope, 'Block8', [x], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(x, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(x, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3], scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1], scope='Conv2d_0c_3x1')
mixed = tf.concat(values=[tower_conv, tower_conv1_2], axis=3)
up = slim.conv2d(mixed, x.get_shape()[3], 1,
normalizer_fn=None,
activation_fn=None,
scope='Conv2d_1x1')
x += scale * up
if activation_fn:
x = activation_fn(x)
return x
def inference(inputs, keep_prob,
bottleneck_size=128,
phase_train=True,
weight_decay=0.0,
reuse=None):
batch_norm_params = {
'decay': 0.995,
'epsilon': 0.001,
'updates_collections': None,
# 'scale': True} # [test1: add 'gamma']
'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES]} # [test2: removed from 'trainable_variables']
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params): # [test4: add weight_decay to biases]):
return inception_resnet_v2(
inputs,
is_training=phase_train,
keep_prob=keep_prob,
bottleneck_size=bottleneck_size,
reuse=reuse)
def block35(x, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resx block."""
with tf.variable_scope(scope, 'Block35', [x], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(x, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(x, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(x, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 32, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 32, 3, scope='Conv2d_0c_3x3')
# tensor dimension: NxWxHxC, concat at dim-c
mixed = tf.concat([tower_conv, tower_conv1_1, tower_conv2_2], 3)
# output_num of up should be equal to input_num of layer
up = slim.conv2d(mixed, x.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1')
x += scale * up
if activation_fn:
x = activation_fn(x)
return x
# Inception-ResNet-B
# (2 branches)
def block8(x, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 ResNet block."""
with tf.variable_scope(scope, 'Block8', [x], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(x, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(x, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 192, [1, 3], scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [3, 1], scope='Conv2d_0c_3x1')
mixed = tf.concat([tower_conv, tower_conv1_2], 3)
up = slim.conv2d(mixed, x.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1')
x += scale * up
if activation_fn:
x = activation_fn(x)
return x
# 35x35x256 -> 17x17x896
# (3 branches)
def inference(inputs, keep_prob,
bottleneck_size=128,
phase_train=True,
weight_decay=0.0,
reuse=None):
batch_norm_params = {
'decay': 0.995,
'epsilon': 0.001,
'updates_collections': None,
# 'scale': True, # [test1]
'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES]} # [test2]
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v1(inputs, is_training=phase_train, keep_prob=keep_prob,
bottleneck_size=bottleneck_size, reuse=reuse)
def _extra_conv_arg_scope_with_bn(weight_decay=0.00001,
activation_fn=None,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': tf.GraphKeys.UPDATE_OPS_EXTRA,
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
def _extra_conv_arg_scope(weight_decay=0.00001, activation_fn=None, normalizer_fn=None):
with slim.arg_scope(
[slim.conv2d, slim.conv2d_transpose],
padding='SAME',
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,) as arg_sc:
with slim.arg_scope(
[slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn) as arg_sc:
return arg_sc
def resnet_arg_scope(is_training=True,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'is_training': False,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'trainable': False,
'updates_collections': tf.GraphKeys.UPDATE_OPS
}
with arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
weights_initializer=slim.variance_scaling_initializer(),
trainable=is_training,
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
return arg_sc
def _image_to_head(self, is_training, reuse=None):
with tf.variable_scope(self._scope, self._scope, reuse=reuse):
net = slim.repeat(self._image, 2, slim.conv2d, 64, [3, 3],
trainable=False, scope='conv1')
net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3],
trainable=False, scope='conv2')
net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3],
trainable=is_training, scope='conv3')
net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],
trainable=is_training, scope='conv4')
net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],
trainable=is_training, scope='conv5')
self._act_summaries.append(net)
self._layers['head'] = net
return net
def create_network(self, input, trainable):
if trainable:
wr = slim.l2_regularizer(self.regularization)
else:
wr = None
# the input is stack of black and white frames.
# put the stack in the place of channel (last in tf)
input_t = tf.transpose(input, [0, 2, 3, 1])
net = slim.conv2d(input_t, 8, (7, 7), data_format="NHWC",
activation_fn=tf.nn.relu, stride=3, weights_regularizer=wr, trainable=trainable)
net = slim.max_pool2d(net, 2, 2)
net = slim.conv2d(net, 16, (3, 3), data_format="NHWC",
activation_fn=tf.nn.relu, weights_regularizer=wr, trainable=trainable)
net = slim.max_pool2d(net, 2, 2)
net = slim.flatten(net)
net = slim.fully_connected(net, 256, activation_fn=tf.nn.relu,
weights_regularizer=wr, trainable=trainable)
q_state_action_values = slim.fully_connected(net, self.dim_actions,
activation_fn=None, weights_regularizer=wr, trainable=trainable)
return q_state_action_values
def __arg_scope(self, weight_decay=0.0005, data_format='NHWC'):
"""Defines the VGG arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
padding='SAME',
data_format=data_format):
with slim.arg_scope([custom_layers.pad2d,
custom_layers.l2_normalization,
custom_layers.channel_to_last],
data_format=data_format) as sc:
return sc
def __call__(self, inputs, state, scope=None):
output, res_state = self._cell(inputs, state)
projected = None
with tf.variable_scope((scope or self._name)):
if self._spec['name'] == 'fc':
projected = slim.fully_connected(output, self._spec['size'], activation_fn=None)
elif self._spec['name'] == 't_conv':
projected = slim.layers.conv2d_transpose(output, self._spec['size'], self._spec['kernel'], self._spec['stride'], activation_fn=None)
elif self._spec['name'] == 'r_conv':
resized = tf.image.resize_images(output, (self._spec['stride'][0] * output.get_shape()[1].value,
self._spec['stride'][1] * output.get_shape()[2].value), method=1)
projected = slim.layers.conv2d(resized, self._spec['size'], self._spec['kernel'], activation_fn=None)
else:
raise ValueError('Unknown layer name "{}"'.format(self._spec['name']))
return projected, res_state