def show_shrinkage(shrink_func,theta,**kwargs):
tf.reset_default_graph()
tf.set_random_seed(kwargs.get('seed',1) )
N = kwargs.get('N',500)
L = kwargs.get('L',4)
nsigmas = kwargs.get('sigmas',10)
shape = (N,L)
rvar = 1e-4
r = np.reshape( np.linspace(0,nsigmas,N*L)*math.sqrt(rvar),shape)
r_ = tfcf(r)
rvar_ = tfcf(np.ones(L)*rvar)
xhat_,dxdr_ = shrink_func(r_,rvar_ ,tfcf(theta))
with tf.Session() as sess:
sess.run( tf.global_variables_initializer() )
xhat = sess.run(xhat_)
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(r.reshape(-1),r.reshape(-1),'y')
plt.plot(r.reshape(-1),xhat.reshape(-1),'b')
if kwargs.has_key('title'):
plt.suptitle(kwargs['title'])
plt.show()
python类set_random_seed()的实例源码
def _embed_sentences(self):
"""Embed sentences via the last output cell of an LSTM"""
word_embeddings = self._get_embedding()
word_feats = tf.nn.embedding_lookup(word_embeddings, self.input)
batch_size = tf.shape(self.input)[0]
with tf.variable_scope("LSTM") as scope:
tf.set_random_seed(self.seed - 1)
# LSTM architecture
cell = tf.contrib.rnn.BasicLSTMCell(self.d)
# Set RNN
initial_state = cell.zero_state(batch_size, tf.float32)
rnn_out, _ = tf.nn.dynamic_rnn(
cell, word_feats, sequence_length=self.input_lengths,
initial_state=initial_state, time_major=False
)
# Get potentials
return get_rnn_output(rnn_out, self.d, self.input_lengths), {}
def testCreateLogisticClassifier(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = LogisticClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 2)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'LogisticClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, '')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(update_ops, [])
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, '')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
def testCreateOnecloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1,
num_ps_tasks=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(clones), 1)
clone = clones[0]
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertDeviceEqual(clone.device, '/job:worker')
self.assertEqual(clone.scope, '')
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
self.assertDeviceEqual(v.device, v.value().device)
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '')
self.assertDeviceEqual(v.device, 'CPU:0')
def set_seed(seed):
seed %= 4294967294
global seed_
seed_ = seed
import lasagne
random.seed(seed)
np.random.seed(seed)
lasagne.random.set_rng(np.random.RandomState(seed))
try:
import tensorflow as tf
tf.set_random_seed(seed)
except Exception as e:
print(e)
print((
colorize(
'using seed %s' % (str(seed)),
'green'
)
))
def _sample(self):
gan = self.gan
z_t = gan.encoder.sample
inputs_t = gan.inputs.x
if self.z is None:
self.z = gan.encoder.sample.eval()
self.target = gan.encoder.sample.eval()
self.input = gan.session.run(gan.inputs.x)
if self.step > self.steps:
self.z = self.target
self.target = gan.encoder.sample.eval()
self.step = 0
percent = float(self.step)/self.steps
z_interp = self.z*(1.0-percent) + self.target*percent
self.step+=1
g=tf.get_default_graph()
with g.as_default():
tf.set_random_seed(1)
return {
'generator': gan.session.run(gan.generator.sample, feed_dict={z_t: z_interp, inputs_t: self.input})
}
def __init__(self, name = None, num_of_persons = 0, recurrent_unit = 'GRU', rnn_layers = 1,
reuse = False, is_training = False, input_net = None):
tf.set_random_seed(SEED)
if num_of_persons <= 0 and is_training:
raise Exception('Parameter num_of_persons has to be greater than zero when thaining')
self.num_of_persons = num_of_persons
self.rnn_layers = rnn_layers
self.recurrent_unit = recurrent_unit
if input_net is None:
input_tensor = tf.placeholder(
dtype = tf.float32,
shape = (None, 17, 17, 32),
name = 'input_image')
else:
input_tensor = input_net
super().__init__(name, input_tensor, self.FEATURES, num_of_persons, reuse, is_training)
def testComputation(self):
tf.set_random_seed(0)
with self.test_session() as sess:
initializer = snt.nets.noisy_identity_kernel_initializer(2, stddev=1e-20)
x = initializer([3, 3, 4, 8])
x = tf.reduce_sum(x, axis=[3])
x_ = sess.run(x)
# Iterate over elements. After summing over depth, assert that only the
# middle pixel is on.
it = np.nditer(x_, flags=["multi_index"])
while not it.finished:
value, idx = it[0], it.multi_index
(filter_height, filter_width, _) = idx
if filter_height == 1 and filter_width == 1:
self.assertAllClose(value, 1)
else:
self.assertAllClose(value, 0)
it.iternext()
def set_seed(seed):
seed %= 4294967294
global seed_
seed_ = seed
random.seed(seed)
np.random.seed(seed)
try:
import lasagne
lasagne.random.set_rng(np.random.RandomState(seed))
except Exception as e:
print(e)
try:
import tensorflow as tf
tf.set_random_seed(seed)
except Exception as e:
print(e)
print((
colorize(
'using seed %s' % (str(seed)),
'green'
)
))
def testCreateLogisticClassifier(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = LogisticClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 2)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'LogisticClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, '')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(update_ops, [])
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, '')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
def testCreateOnecloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1,
num_ps_tasks=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(clones), 1)
clone = clones[0]
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertDeviceEqual(clone.device, '/job:worker')
self.assertEqual(clone.scope, '')
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
self.assertDeviceEqual(v.device, v.value().device)
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '')
self.assertDeviceEqual(v.device, 'CPU:0')
def test_time(self):
"""Test that a `time` over the `length` triggers a finished flag."""
tf.set_random_seed(23)
time = tf.convert_to_tensor(5, dtype=tf.int32)
lengths = tf.constant([4, 5, 6, 7])
output = tf.random_normal([4, 10, 3], dtype=tf.float32)
finished = layers.TerminationHelper(lengths).finished(time, output)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
act_finished = sess.run(finished)
# NOTA BENE: we have set that
# time = 5
# lengths = [4, 5, 6, 7]
#
# Since the time is 0-based, having time=5 means that
# we have alread scanned through 5 elements, so only
# the last sequence in the batch is ongoing.
exp_finished = [True, True, True, False]
self.assertAllEqual(exp_finished, act_finished)
def testCreateLogisticClassifier(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = LogisticClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 2)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'LogisticClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, '')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(update_ops, [])
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, '')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
def testCreateOnecloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1,
num_ps_tasks=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(clones), 1)
clone = clones[0]
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertDeviceEqual(clone.device, '/job:worker')
self.assertEqual(clone.scope, '')
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
self.assertDeviceEqual(v.device, v.value().device)
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '')
self.assertDeviceEqual(v.device, 'CPU:0')
def _check_adam():
for _mode in HO_MODES[:2]:
for _model in IMPLEMENTED_MODEL_TYPES[1:2]:
_model_kwargs = {'dims': [None, 300, 300, None]}
tf.reset_default_graph()
# set random seeds!!!!
np.random.seed(1)
tf.set_random_seed(1)
experiment('test_with_model_' + _model,
collect_data=False, hyper_iterations=3, mode=_mode, epochs=3,
optimizer=rf.AdamOptimizer,
optimizer_kwargs={'lr': tf.Variable(.001, name='eta_adam')},
model=_model,
model_kwargs=_model_kwargs,
set_T=100,
)
def _check_forward():
w_100 = []
for i in range(1):
for _mode in HO_MODES[0:1]:
for _model in IMPLEMENTED_MODEL_TYPES[0:2]:
_model_kwargs = {} # {'dims': [None, 300, 300, None]}
tf.reset_default_graph()
# set random seeds!!!!
np.random.seed(1)
tf.set_random_seed(1)
results = experiment('test_with_model_' + _model, collect_data=False, hyper_iterations=10, mode=_mode,
epochs=None,
model=_model,
model_kwargs=_model_kwargs,
set_T=1000,
synthetic_hypers=None,
hyper_batch_size=100
# optimizer=rf.GradientDescentOptimizer,
# optimizer_kwargs={'lr': tf.Variable(.01, name='eta')}
)
w_100.append(results[0]['weights'])
# rf.save_obj(w_100, 'check_forward')
return w_100
def _check_all_methods():
for _mode in HO_MODES[:]:
for _model in IMPLEMENTED_MODEL_TYPES:
# _model_kwargs = {'dims': [None, 300, 300, None]}
tf.reset_default_graph()
# set random seeds!!!!
np.random.seed(1)
tf.set_random_seed(1)
experiment('test_with_model_' + _model, collect_data=False, hyper_iterations=3, mode=_mode,
# epochs=3,
model=_model,
# model_kwargs=_model_kwargs,
set_T=100,
synthetic_hypers=None,
hyper_batch_size=100
# optimizer=rf.GradientDescentOptimizer,
# optimizer_kwargs={'lr': tf.Variable(.01, name='eta')}
)
def _check_cnn():
print('END')
for _mode in HO_MODES[2:3]:
for _model in IMPLEMENTED_MODEL_TYPES[2:3]:
tf.reset_default_graph()
np.random.seed(1)
tf.set_random_seed(1)
_model_kwargs = {'conv_dims': [[5, 5, 1, 2], [5, 5, 2, 4], [5, 5, 4, 8]],
'ffnn_dims': [128, 10]}
# noinspection PyTypeChecker
experiment('test_with_model_' + _model, collect_data=False, hyper_iterations=3, mode=_mode,
epochs=2,
model=_model,
model_kwargs=_model_kwargs,
set_T=100,
synthetic_hypers=None,
hyper_batch_size=100,
l1=None,
l2=None
# optimizer=rf.GradientDescentOptimizer,
# optimizer_kwargs={'lr': tf.Variable(.01, name='eta')}
)
def __init__(self):
self.model_name = self.__class__.__name__
self.sess = tf.Session()
# get arguments
self.args = self.get_args()
# log set
logging.basicConfig(filename=self.args.log_file,
level=logging.DEBUG,
format='%(asctime)s %(message)s', datefmt='%y-%m-%d %H:%M')
# set random seed
np.random.seed(self.args.random_seed)
tf.set_random_seed(self.args.random_seed)
# save arguments
save_args(args=self.args)
def load_tf_session():
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Image dimensions ordering should follow the Theano convention
if keras.backend.image_dim_ordering() != 'th':
keras.backend.set_image_dim_ordering('th')
print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to 'tf', temporarily setting to 'th'")
# Create TF session and set as Keras backend session
sess = tf.Session()
keras.backend.set_session(sess)
print("Created TensorFlow session and set Keras backend.")
return sess
# Get MNIST test data
def testCreateLogisticClassifier(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = LogisticClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 2)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'LogisticClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, '')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(update_ops, [])
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, '')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
def testCreateOnecloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1,
num_ps_tasks=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(clones), 1)
clone = clones[0]
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertDeviceEqual(clone.device, '/job:worker')
self.assertEqual(clone.scope, '')
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
self.assertDeviceEqual(v.device, v.value().device)
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '')
self.assertDeviceEqual(v.device, 'CPU:0')