def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
with self.test_session() as sess:
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v3(train_inputs, num_classes)
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v3(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
sess.run(tf.initialize_all_variables())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
python类get_variable_scope()的实例源码
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_a(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_a(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_16(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_16(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_19(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_19(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 300, 400
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = alexnet.alexnet_v2(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 4, 7, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 231, 231
eval_height, eval_width = 281, 281
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = overfeat.overfeat(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = overfeat.overfeat(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
with self.test_session() as sess:
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v3(train_inputs, num_classes)
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v3(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def probability(self):
def lstm_cell():
if 'reuse' in inspect.getargspec(tf.contrib.rnn.GRUCell.__init__).args:
return tf.contrib.rnn.GRUCell(self.emb_dim, reuse=tf.get_variable_scope().reuse)
else:
return tf.contrib.rnn.GRUCell(self.emb_dim)
attn_cell = lstm_cell
if self.dropout < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=self._keep_prob)
single_cell = tf.contrib.rnn.MultiRNNCell([attn_cell() for _ in range(self.num_layers)], state_is_tuple=True)
output, state = tf.nn.dynamic_rnn(single_cell, self._data, dtype=tf.float32,
sequence_length=self._length)
weight = tf.Variable(tf.truncated_normal([self.emb_dim, self.num_classes], stddev=0.01))
bias = tf.Variable(tf.constant(0.1, shape=[self.num_classes]))
self.output = output
probability = tf.matmul(self.last_relevant(output, self._length), weight) + bias
return probability
def probability(self):
def lstm_cell():
if 'reuse' in inspect.getargspec(tf.contrib.rnn.GRUCell.__init__).args:
return tf.contrib.rnn.GRUCell(self.emb_dim, reuse=tf.get_variable_scope().reuse)
else:
return tf.contrib.rnn.GRUCell(self.emb_dim)
attn_cell = lstm_cell
if self.dropout < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=self._keep_prob)
single_cell = tf.contrib.rnn.MultiRNNCell([attn_cell() for _ in range(self.num_layers)], state_is_tuple=True)
output, state = tf.nn.dynamic_rnn(single_cell, self._data, dtype=tf.float32,
sequence_length=self._length)
weight = tf.Variable(tf.truncated_normal([self.emb_dim, self.num_classes], stddev=0.01))
bias = tf.Variable(tf.constant(0.1, shape=[self.num_classes]))
self.output = output
probability = tf.matmul(self.last_relevant(output, self._length), weight) + bias
return probability
def __init__(self, state_shape, n_hidden, summary=True):
super(CriticNetwork, self).__init__()
self.state_shape = state_shape
self.n_hidden = n_hidden
with tf.variable_scope("critic"):
self.states = tf.placeholder("float", [None] + self.state_shape, name="states")
self.r = tf.placeholder(tf.float32, [None], name="r")
L1 = tf.contrib.layers.fully_connected(
inputs=self.states,
num_outputs=self.n_hidden,
activation_fn=tf.tanh,
weights_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.02),
biases_initializer=tf.zeros_initializer(),
scope="L1")
self.value = tf.reshape(linear(L1, 1, "value", normalized_columns_initializer(1.0)), [-1])
self.loss = tf.reduce_sum(tf.square(self.value - self.r))
self.summary_loss = self.loss
self.vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
def conv2d(x, num_kernels, kernel_h=5, kernel_w=5, strides=2, padding="VALID", name="conv2d",
use_bn=True, activation=tf.nn.relu, alpha=None, is_train=True, stddv=0.02):
"""
Wrapper function for convolutional layer
"""
n, h, w, c = x.get_shape().as_list()
with tf.variable_scope(name):
w = tf.get_variable(name="weight", initializer=tf.truncated_normal_initializer(stddev=stddv),
shape=(kernel_h, kernel_w, c, num_kernels))
bias = tf.get_variable(name="bias", initializer=tf.constant_initializer(0.01), shape=num_kernels)
y = tf.nn.conv2d(x, w, (1, strides, strides, 1), padding)
y = tf.nn.bias_add(y, bias)
if use_bn:
y = batch_norm(y, tf.get_variable_scope().name, is_train)
print("Convolutional 2D Layer %s, kernel size %s, output size %s Reuse:%s"
% (tf.get_variable_scope().name, (kernel_h, kernel_w, c, num_kernels), y.get_shape().as_list(),
tf.get_variable_scope().reuse))
if alpha is None:
y = activation(y)
else:
y = activation(y, alpha)
return y
def transpose_conv2d(x, output_shape, kernel_h=5, kernel_w=5, activation=tf.nn.relu, stride=2, padding="VALID",
use_bn=True, is_train=True, stddv=0.02, name="transpose_conv2d"):
n, h, w, c = x.get_shape().as_list()
num_kernels = output_shape[-1]
with tf.variable_scope(name):
w = tf.get_variable(name="weight", initializer=tf.truncated_normal_initializer(stddev=stddv),
shape=(kernel_h, kernel_w, num_kernels, c))
bias = tf.get_variable(name="bias", initializer=tf.constant_initializer(0.01), shape=num_kernels)
y = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, padding=padding,
strides=(1, stride, stride, 1))
y = tf.nn.bias_add(y, bias)
if use_bn:
y = batch_norm(y, tf.get_variable_scope().name, is_train)
print("Transposed Convolutional 2D Layer %s, kernel size %s, output size %s Reuse:%s"
% (tf.get_variable_scope().name, (kernel_h, kernel_w, c, num_kernels), y.get_shape().as_list(),
tf.get_variable_scope().reuse))
return activation(y)
def dense_layer(x, num_neurons, name, activation, use_bn=False, is_train=True, stddv=0.02):
if len(x.get_shape().as_list()) > 2:
n, h, w, c = x.get_shape().as_list()
d = h * w * c
else:
n, d = x.get_shape().as_list()
with tf.variable_scope(name):
# flatten x
x = tf.reshape(x, (-1, d))
w = tf.get_variable("weight", shape=(d, num_neurons), initializer=tf.random_normal_initializer(stddev=stddv))
b = tf.get_variable("bias", shape=num_neurons, initializer=tf.constant_initializer(0.01))
y = tf.matmul(x, w) + b
if use_bn:
y = batch_norm(y, name=tf.get_variable_scope().name, is_train=is_train)
print("Dense Layer %s, output size %s" % (tf.get_variable_scope().name, y.get_shape().as_list()))
return activation(y)
def discriminator(self, inpt, reuse, is_train):
"""
Build D for training or testing. If reuse if True, the input should be the output of generator
"""
with tf.variable_scope("discriminator"):
if reuse:
tf.get_variable_scope().reuse_variables()
net = conv2d(x=inpt, num_kernels=self.d_init, name="conv1", activation=lkrelu, padding="SAME",
alpha=0.02, is_train=is_train, stddv=self.stddv)
net = conv2d(x=net, num_kernels=self.d_init*2, name="conv2", activation=lkrelu, padding="SAME",
alpha=0.02, is_train=is_train, stddv=self.stddv)
net = conv2d(x=net, num_kernels=self.d_init*4, name="conv3", activation=lkrelu, padding="SAME",
alpha=0.02, is_train=is_train, stddv=self.stddv)
net = conv2d(x=net, num_kernels=self.d_init*8, name="conv4", activation=lkrelu, padding="SAME",
alpha=0.02, is_train=is_train, stddv=self.stddv)
net = dense_layer(x=net, num_neurons=1, name="output", activation=tf.identity, is_train=is_train,
stddv=self.stddv)
return net
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_a(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_a(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_16(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_16(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_19(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_19(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 300, 400
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = alexnet.alexnet_v2(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 4, 7, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 231, 231
eval_height, eval_width = 281, 281
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = overfeat.overfeat(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = overfeat.overfeat(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def layer_norm(x, axes=1, initial_bias_value=0.0, epsilon=1e-3, name="var"):
"""
Apply layer normalization to x
Args:
x: input variable.
initial_bias_value: initial value for the LN bias.
epsilon: small constant value to avoid division by zero.
scope: scope or name for the LN op.
Returns:
LN(x) with same shape as x
"""
if not isinstance(axes, list):
axes = [axes]
scope = tf.get_variable_scope()
with tf.variable_scope(scope):
with tf.variable_scope(name):
mean = tf.reduce_mean(x, axes, keep_dims=True)
variance = tf.sqrt(tf.reduce_mean(tf.square(x - mean), axes, keep_dims=True))
with tf.device('/cpu:0'):
gain = tf.get_variable('gain', x.get_shape().as_list()[1:],
initializer=tf.constant_initializer(1.0))
bias = tf.get_variable('bias', x.get_shape().as_list()[1:],
initializer=tf.constant_initializer(initial_bias_value))
return gain * (x - mean) / (variance + epsilon) + bias