def residual_block(net, ch = 256, ch_inner = 128, scope = None, reuse = None, stride = 1):
"""
Bottleneck v2
"""
with slim.arg_scope([layers.convolution2d],
activation_fn = None,
normalizer_fn = None):
with tf.variable_scope(scope, 'ResidualBlock', reuse = reuse):
in_net = net
if stride > 1:
net = layers.convolution2d(net, ch, kernel_size = 1, stride = stride)
in_net = layers.batch_norm(in_net)
in_net = tf.nn.relu(in_net)
in_net = layers.convolution2d(in_net, ch_inner, 1)
in_net = layers.batch_norm(in_net)
in_net = tf.nn.relu(in_net)
in_net = layers.convolution2d(in_net, ch_inner, 3, stride = stride)
in_net = layers.batch_norm(in_net)
in_net = tf.nn.relu(in_net)
in_net = layers.convolution2d(in_net, ch, 1, activation_fn = None)
net = tf.nn.relu(in_net + net)
return net
python类convolution2d()的实例源码
def conv1d(self, net, num_ker, ker_size, stride):
# 1D-convolution
net = convolution2d(
net,
num_outputs=num_ker,
kernel_size=[ker_size, 1],
stride=[stride, 1],
padding='SAME',
activation_fn=None,
normalizer_fn=None,
weights_initializer=variance_scaling_initializer(),
weights_regularizer=l2_regularizer(self.weight_decay),
biases_initializer=tf.zeros_initializer)
return net
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False):
with tf.variable_scope(scope, reuse=reuse):
out = inpt
with tf.variable_scope("convnet"):
for num_outputs, kernel_size, stride in convs:
out = layers.convolution2d(out,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
activation_fn=tf.nn.relu)
out = layers.flatten(out)
with tf.variable_scope("action_value"):
action_out = out
for hidden in hiddens:
action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=tf.nn.relu)
action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)
if dueling:
with tf.variable_scope("state_value"):
state_out = out
for hidden in hiddens:
state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=tf.nn.relu)
state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
return state_score + action_scores_centered
else:
return action_scores
return out
def conv2d(self, net, num_ker, ker_size, stride):
net = convolution2d(
net,
num_outputs=num_ker,
kernel_size=[ker_size, ker_size],
stride=[stride, stride],
padding='SAME',
activation_fn=None,
normalizer_fn=None,
weights_initializer=variance_scaling_initializer(),
weights_regularizer=l2_regularizer(FLAGS.weight_decay),
biases_initializer=tf.zeros_initializer)
return net
def forward(image, num_actions):
# Conv1
out = layers.convolution2d(image, num_outputs=16, kernel_size=8, stride=4, activation_fn=tf.nn.relu, scope='conv1')
out = layers.convolution2d(out, num_outputs=32, kernel_size=4, stride=2, activation_fn=tf.nn.relu, scope='conv2')
out = layers.flatten(out, scope='flatten')
out = layers.fully_connected(out, num_outputs=256, activation_fn=tf.nn.relu, scope='fc1')
action_logprobs = tf.nn.log_softmax(layers.fully_connected(out, num_outputs=num_actions, activation_fn=None, scope='fc_actor'))
value = layers.fully_connected(out, num_outputs=1, activation_fn=None, scope='fc_critic')
value = tf.reshape(value, [-1])
return action_logprobs, value
def dueling_model(img_in, num_actions, scope, noisy=False, reuse=False):
"""As described in https://arxiv.org/abs/1511.06581"""
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
out = layers.flatten(out)
with tf.variable_scope("state_value"):
if noisy:
# Apply noisy network on fully connected layers
# ref: https://arxiv.org/abs/1706.10295
state_hidden = noisy_dense(out, name='noisy_fc1', size=512, activation_fn=tf.nn.relu)
state_score = noisy_dense(state_hidden, name='noisy_fc2', size=1)
else:
state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
with tf.variable_scope("action_value"):
if noisy:
# Apply noisy network on fully connected layers
# ref: https://arxiv.org/abs/1706.10295
actions_hidden = noisy_dense(out, name='noisy_fc1', size=512, activation_fn=tf.nn.relu)
action_scores = noisy_dense(actions_hidden, name='noisy_fc2', size=num_actions)
else:
actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)
return state_score + action_scores
def policy_and_value_network(observations):
# TODO: Baseline network, used in (Mnih et al., 2016)
conv = tf_layers.convolution2d(observations, 16, 8, 4)
conv = tf_layers.convolution2d(conv, 32, 4, 2)
conv = tf_layers.flatten(conv)
hidden_layer = tf_layers.fully_connected(conv, 128, activation_fn=tf.nn.relu)
logits = tf_layers.linear(hidden_layer, env.actions)
value = tf_layers.linear(hidden_layer, 1)
# TODO: If you do not want to use baseline, uncomment the next line
# value = tf.zeros([tf.shape(observations)[0], 1])
return logits, value
extracting_weights.py 文件源码
项目:Hands-On-Deep-Learning-with-TensorFlow
作者: PacktPublishing
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def conv_learn(X, y, mode):
# Ensure our images are 2d
X = tf.reshape(X, [-1, 36, 36, 1])
# We'll need these in one-hot format
y = tf.one_hot(tf.cast(y, tf.int32), 5, 1, 0)
# conv layer will compute 4 kernels for each 5x5 patch
with tf.variable_scope('conv_layer'):
# 5x5 convolution, pad with zeros on edges
h1 = layers.convolution2d(X, num_outputs=4,
kernel_size=[5, 5],
activation_fn=tf.nn.relu)
# 2x2 Max pooling, no padding on edges
p1 = tf.nn.max_pool(h1, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='VALID')
# Need to flatten conv output for use in dense layer
p1_size = np.product(
[s.value for s in p1.get_shape()[1:]])
p1f = tf.reshape(p1, [-1, p1_size ])
# densely connected layer with 32 neurons and dropout
h_fc1 = layers.fully_connected(p1f,
5,
activation_fn=tf.nn.relu)
drop = layers.dropout(h_fc1, keep_prob=0.5, is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)
logits = layers.fully_connected(drop, 5, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(y, logits)
# Setup the training function manually
train_op = layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return tf.argmax(logits, 1), loss, train_op
# Use generic estimator with our function
cnn.py 文件源码
项目:Hands-On-Deep-Learning-with-TensorFlow
作者: PacktPublishing
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def conv_learn(X, y, mode):
# Ensure our images are 2d
X = tf.reshape(X, [-1, 36, 36, 1])
# We'll need these in one-hot format
y = tf.one_hot(tf.cast(y, tf.int32), 5, 1, 0)
# conv layer will compute 4 kernels for each 5x5 patch
with tf.variable_scope('conv_layer'):
# 5x5 convolution, pad with zeros on edges
h1 = layers.convolution2d(X, num_outputs=4,
kernel_size=[5, 5],
activation_fn=tf.nn.relu)
# 2x2 Max pooling, no padding on edges
p1 = tf.nn.max_pool(h1, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='VALID')
# Need to flatten conv output for use in dense layer
p1_size = np.product(
[s.value for s in p1.get_shape()[1:]])
p1f = tf.reshape(p1, [-1, p1_size ])
# densely connected layer with 32 neurons and dropout
h_fc1 = layers.fully_connected(p1f,
5,
activation_fn=tf.nn.relu)
drop = layers.dropout(h_fc1, keep_prob=0.5, is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)
logits = layers.fully_connected(drop, 5, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(y, logits)
# Setup the training function manually
train_op = layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return tf.argmax(logits, 1), loss, train_op
# Use generic estimator with our function
layers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def testInvalidDataFormat(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
layers_lib.convolution2d(images, 32, 3, data_format='CHWN')
layers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def testCreateConv(self):
height, width = 7, 9
with self.test_session():
images = np.random.uniform(size=(5, height, width, 4))
output = layers_lib.convolution2d(images, 32, [3, 3])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32])
biases = variables.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
layers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def testCreateConvNCHW(self):
height, width = 7, 9
with self.test_session():
images = np.random.uniform(size=(5, 4, height, width))
output = layers_lib.convolution2d(images, 32, [3, 3], data_format='NCHW')
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 32, height, width])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32])
biases = variables.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
layers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def testCreateSquareConv(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, 3)
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
layers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def testCreateConvWithTensorShape(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, images.get_shape()[1:3])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
layers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def testFullyConvWithCustomGetter(self):
height, width = 7, 9
with self.test_session():
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope('test', custom_getter=custom_getter):
images = random_ops.random_uniform((5, height, width, 32), seed=1)
layers_lib.convolution2d(images, 64, images.get_shape()[1:3])
self.assertEqual(called[0], 2) # Custom getter called twice.
layers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def testCreateVerticalConv(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 4), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 1])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 1, 4, 32])
biases = variables.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
layers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def testCreateHorizontalConv(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 4), seed=1)
output = layers_lib.convolution2d(images, 32, [1, 3])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [1, 3, 4, 32])
layers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def testCreateConvWithStride(self):
height, width = 6, 8
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 3], stride=2)
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
layers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def testCreateConvCreatesWeightsAndBiasesVars(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.test_session():
self.assertFalse(variables.get_variables('conv1/weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
layers_lib.convolution2d(images, 32, [3, 3], scope='conv1')
self.assertTrue(variables.get_variables('conv1/weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
layers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def testCreateConvWithCollection(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with ops.name_scope('fe'):
conv = layers_lib.convolution2d(
images, 32, [3, 3], outputs_collections='outputs', scope='Conv')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['fe/Conv'])
self.assertEqual(output_collected, conv)