def assign_network_to_target():
update_wfc1 = tf.assign(w_fc1_target, w_fc1)
update_wfc2 = tf.assign(w_fc2_target, w_fc2)
update_wfc3 = tf.assign(w_fc3_target, w_fc3)
update_bfc1 = tf.assign(b_fc1_target, b_fc1)
update_bfc2 = tf.assign(b_fc2_target, b_fc2)
update_bfc3 = tf.assign(b_fc3_target, b_fc3)
sess.run(update_wfc1)
sess.run(update_wfc2)
sess.run(update_wfc3)
sess.run(update_bfc1)
sess.run(update_bfc2)
sess.run(update_bfc3)
# Input
python类assign()的实例源码
def assign_network_to_target():
update_wfc1 = tf.assign(w_fc1_target, w_fc1)
update_wfc2 = tf.assign(w_fc2_target, w_fc2)
update_wfc3 = tf.assign(w_fc3_target, w_fc3)
update_bfc1 = tf.assign(b_fc1_target, b_fc1)
update_bfc2 = tf.assign(b_fc2_target, b_fc2)
update_bfc3 = tf.assign(b_fc3_target, b_fc3)
sess.run(update_wfc1)
sess.run(update_wfc2)
sess.run(update_wfc3)
sess.run(update_bfc1)
sess.run(update_bfc2)
sess.run(update_bfc3)
# Input
def xavier_initializer(shape):
dim_sum = np.sum(shape)
if len(shape) == 1:
dim_sum += 1
bound = np.sqrt(2.0 / dim_sum)
return tf.random_uniform(shape, minval=-bound, maxval=bound)
# # Assigning network variables to target network variables
# def assign_network_to_target():
# update_wfc = tf.assign(w_fc_target, w_fc)
# update_bfc = tf.assign(b_fc_target, b_fc)
# sess.run(update_wfc)
# sess.run(update_bfc)
# cell_target = cell
# Input
def get_video_weights(video_id_batch):
video_id_to_index = tf.contrib.lookup.string_to_index_table_from_file(
vocabulary_file=FLAGS.sample_vocab_file, default_value=0)
indexes = video_id_to_index.lookup(video_id_batch)
weights, length = get_video_weights_array()
weights_input = tf.placeholder(tf.float32, shape=[length], name="sample_weights_input")
weights_tensor = tf.get_variable("sample_weights",
shape=[length],
trainable=False,
dtype=tf.float32,
initializer=tf.constant_initializer(weights))
weights_assignment = tf.assign(weights_tensor, weights_input)
tf.add_to_collection("weights_input", weights_input)
tf.add_to_collection("weights_assignment", weights_assignment)
video_weight_batch = tf.nn.embedding_lookup(weights_tensor, indexes)
return video_weight_batch
def get_video_weights(video_id_batch):
video_id_to_index = tf.contrib.lookup.string_to_index_table_from_file(
vocabulary_file=FLAGS.sample_vocab_file, default_value=0)
indexes = video_id_to_index.lookup(video_id_batch)
weights, length = get_video_weights_array()
weights_input = tf.placeholder(tf.float32, shape=[length], name="sample_weights_input")
weights_tensor = tf.get_variable("sample_weights",
shape=[length],
trainable=False,
dtype=tf.float32,
initializer=tf.constant_initializer(weights))
weights_assignment = tf.assign(weights_tensor, weights_input)
tf.add_to_collection("weights_input", weights_input)
tf.add_to_collection("weights_assignment", weights_assignment)
video_weight_batch = tf.nn.embedding_lookup(weights_tensor, indexes)
return video_weight_batch
def get_video_weights(video_id_batch):
video_id_to_index = tf.contrib.lookup.string_to_index_table_from_file(
vocabulary_file=FLAGS.sample_vocab_file, default_value=0)
indexes = video_id_to_index.lookup(video_id_batch)
weights, length = get_video_weights_array()
weights_input = tf.placeholder(tf.float32, shape=[length], name="sample_weights_input")
weights_tensor = tf.get_variable("sample_weights",
shape=[length],
trainable=False,
dtype=tf.float32,
initializer=tf.constant_initializer(weights))
weights_assignment = tf.assign(weights_tensor, weights_input)
tf.add_to_collection("weights_input", weights_input)
tf.add_to_collection("weights_assignment", weights_assignment)
video_weight_batch = tf.nn.embedding_lookup(weights_tensor, indexes)
return video_weight_batch
def batchnorm(x, name, phase, updates, gamma=0.96):
k = x.get_shape()[1]
runningmean = tf.get_variable(name+"/mean", shape=[1, k], initializer=tf.constant_initializer(0.0), trainable=False)
runningvar = tf.get_variable(name+"/var", shape=[1, k], initializer=tf.constant_initializer(1e-4), trainable=False)
testy = (x - runningmean) / tf.sqrt(runningvar)
mean_ = mean(x, axis=0, keepdims=True)
var_ = mean(tf.square(x), axis=0, keepdims=True)
std = tf.sqrt(var_)
trainy = (x - mean_) / std
updates.extend([
tf.assign(runningmean, runningmean * gamma + mean_ * (1 - gamma)),
tf.assign(runningvar, runningvar * gamma + var_ * (1 - gamma))
])
y = switch(phase, trainy, testy)
out = y * tf.get_variable(name+"/scaling", shape=[1, k], initializer=tf.constant_initializer(1.0), trainable=True)\
+ tf.get_variable(name+"/translation", shape=[1,k], initializer=tf.constant_initializer(0.0), trainable=True)
return out
# ================================================================
# Mathematical utils
# ================================================================
def accumulate_strings(values, name="strings"):
"""Accumulates strings into a vector.
Args:
values: A 1-d string tensor that contains values to add to the accumulator.
Returns:
A tuple (value_tensor, update_op).
"""
tf.assert_type(values, tf.string)
strings = tf.Variable(
name=name,
initial_value=[],
dtype=tf.string,
trainable=False,
collections=[],
validate_shape=True)
value_tensor = tf.identity(strings)
update_op = tf.assign(
ref=strings, value=tf.concat([strings, values], 0), validate_shape=False)
return value_tensor, update_op
def fix_variables(self, sess, pretrained_model):
print('Fix VGG16 layers..')
with tf.variable_scope('Fix_VGG16') as scope:
with tf.device("/cpu:0"):
# fix the vgg16 issue from conv weights to fc weights
# fix RGB to BGR
fc6_conv = tf.get_variable("fc6_conv", [7, 7, 512, 4096], trainable=False)
fc7_conv = tf.get_variable("fc7_conv", [1, 1, 4096, 4096], trainable=False)
conv1_rgb = tf.get_variable("conv1_rgb", [3, 3, 3, 64], trainable=False)
restorer_fc = tf.train.Saver({self._scope + "/fc6/weights": fc6_conv,
self._scope + "/fc7/weights": fc7_conv,
self._scope + "/conv1/conv1_1/weights": conv1_rgb})
restorer_fc.restore(sess, pretrained_model)
sess.run(tf.assign(self._variables_to_fix[self._scope + '/fc6/weights:0'], tf.reshape(fc6_conv,
self._variables_to_fix[self._scope + '/fc6/weights:0'].get_shape())))
sess.run(tf.assign(self._variables_to_fix[self._scope + '/fc7/weights:0'], tf.reshape(fc7_conv,
self._variables_to_fix[self._scope + '/fc7/weights:0'].get_shape())))
sess.run(tf.assign(self._variables_to_fix[self._scope + '/conv1/conv1_1/weights:0'],
tf.reverse(conv1_rgb, [2])))
def build_train_op(self):
config=self.config
self.g_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
.minimize(self.g_loss, var_list=self.g_vars)
self.d_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
.minimize(self.d_loss, var_list=self.d_vars)
self.d_label_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
.minimize(self.d_labelLossReal, var_list=self.dl_vars)
self.d_gen_label_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
.minimize(self.g_lossLabels_GLabeler, var_list=self.dl_gen_vars)
self.d_on_z_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
.minimize(self.g_loss_on_z + self.rec_loss_coeff*self.real_reconstruction_loss, var_list=self.dz_vars)
self.k_t_update = tf.assign(self.k_t, self.k_t*tf.exp(-1.0/config.tau) )
self.train_op=tf.group(self.d_gen_label_optim,self.d_label_optim,self.d_optim,self.g_optim,self.d_on_z_optim)
def apply_gradients(self, grads_and_vars, global_step=None):
"""Apply gradients to model variables specified in `grads_and_vars`.
`apply_gradients` returns an op that calls
`tf.train.Optimizer.apply_gradients` and then zeros the gradient
variables stored in `self.grads_and_vars`.
Args:
grads_and_vars (list): Description.
global_step (None, optional): tensorflow global_step variable.
Returns:
(tf.Operation): Applies gradient update to model followed by an
internal gradient zeroing operation to `self.grads_and_vars`.
"""
self.mini_flag = tf.assign(self.mini_flag, tf.constant([0], dtype = tf.float32))
# grads_and_vars = self.aggregate_gradients(grads_and_vars, method='average')
with tf.control_dependencies([self.mini_flag]):
optimize = self._optimizer.apply_gradients(grads_and_vars,
global_step=global_step)
#return [optimize, self.zero_grad()]
return optimize
def load_trainable_vars(sess,filename):
"""load a .npz archive and assign the value of each loaded
ndarray to the trainable variable whose name matches the
archive key. Any elements in the archive that do not have
a corresponding trainable variable will be returned in a dict.
"""
other={}
try:
tv=dict([ (str(v.name),v) for v in tf.trainable_variables() ])
for k,d in np.load(filename).items():
if k in tv:
print('restoring ' + k)
sess.run(tf.assign( tv[k], d) )
else:
other[k] = d
except IOError:
pass
return other
def learn(self):
# hard replace parameters
if self.a_replace_counter % REPLACE_ITER_A == 0:
self.sess.run([tf.assign(t, e) for t, e in zip(self.at_params, self.ae_params)])
if self.c_replace_counter % REPLACE_ITER_C == 0:
self.sess.run([tf.assign(t, e) for t, e in zip(self.ct_params, self.ce_params)])
self.a_replace_counter += 1; self.c_replace_counter += 1
indices = np.random.choice(MEMORY_CAPACITY, size=BATCH_SIZE)
bt = self.memory[indices, :]
bs = bt[:, :self.s_dim]
ba = bt[:, self.s_dim: self.s_dim + self.a_dim]
br = bt[:, -self.s_dim - 1: -self.s_dim]
bs_ = bt[:, -self.s_dim:]
self.sess.run(self.atrain, {self.S: bs})
self.sess.run(self.ctrain, {self.S: bs, self.a: ba, self.R: br, self.S_: bs_})
def update_prmt_dqn(scope_main):
q_prmts = tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES , scope_main + "/q_network" )
target_prmts = tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope_main + "/target_network" )
sess.run( [tf.assign(t , q)for t,q in zip(target_prmts , q_prmts)]) #***
print("updating target-network parmeters...")
#
# def local2global():
# def global2local():
# ========= Error Raise =========
def batch_set_value(tuples):
'''Sets the values of many tensor variables at once.
# Arguments
tuples: a list of tuples `(tensor, value)`.
`value` should be a Numpy array.
'''
if tuples:
assign_ops = []
feed_dict = {}
for x, value in tuples:
value = np.asarray(value)
tf_dtype = _convert_string_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = tf.placeholder(tf_dtype, shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
assign_ops.append(assign_op)
feed_dict[assign_placeholder] = value
get_session().run(assign_ops, feed_dict=feed_dict)
def __init__(self, inputs, outputs, updates=[]):
assert type(inputs) in {list, tuple}, 'Input to a TensorFlow backend function should be a list or tuple.'
assert type(outputs) in {list, tuple}, 'Output to a TensorFlow backend function should be a list or tuple.'
assert type(updates) in {list, tuple}, 'Updates in a TensorFlow backend function should be a list or tuple.'
self.inputs = list(inputs)
self.outputs = list(outputs)
with tf.control_dependencies(self.outputs):
updates_ops = []
for update in updates:
if type(update) is tuple:
p, new_p = update
updates_ops.append(tf.assign(p, new_p))
else:
# assumed already an op
updates_ops.append(update)
self.updates_op = tf.group(*updates_ops)
def set_param_values(self, flattened_params, **tags):
debug = tags.pop("debug", False)
param_values = unflatten_tensors(
flattened_params, self.get_param_shapes(**tags))
ops = []
feed_dict = dict()
for param, dtype, value in zip(
self.get_params(**tags),
self.get_param_dtypes(**tags),
param_values):
if param not in self._cached_assign_ops:
assign_placeholder = tf.placeholder(dtype=param.dtype.base_dtype)
assign_op = tf.assign(param, assign_placeholder)
self._cached_assign_ops[param] = assign_op
self._cached_assign_placeholders[param] = assign_placeholder
ops.append(self._cached_assign_ops[param])
feed_dict[self._cached_assign_placeholders[param]] = value.astype(dtype)
if debug:
print("setting value of %s" % param.name)
tf.get_default_session().run(ops, feed_dict=feed_dict)
def predict_sym(self, xs):
return L.get_output(self.l_out, xs)
# def fit(self, xs, ys):
# if self._normalize_inputs:
# # recompute normalizing constants for inputs
# new_mean = np.mean(xs, axis=0, keepdims=True)
# new_std = np.std(xs, axis=0, keepdims=True) + 1e-8
# tf.get_default_session().run(tf.group(
# tf.assign(self._x_mean_var, new_mean),
# tf.assign(self._x_std_var, new_std),
# ))
# inputs = [xs, ys]
# loss_before = self._optimizer.loss(inputs)
# if self._name:
# prefix = self._name + "_"
# else:
# prefix = ""
# logger.record_tabular(prefix + 'LossBefore', loss_before)
# self._optimizer.optimize(inputs)
# loss_after = self._optimizer.loss(inputs)
# logger.record_tabular(prefix + 'LossAfter', loss_after)
# logger.record_tabular(prefix + 'dLoss', loss_before - loss_after)
def batch_norm_wrapper(inputs, is_training, decay = 0.999):
scale = tf.Variable(tf.ones([inputs.get_shape()[-1]]))
beta = tf.Variable(tf.zeros([inputs.get_shape()[-1]]))
pop_mean = tf.Variable(tf.zeros([inputs.get_shape()[-1]]), trainable=False)
pop_var = tf.Variable(tf.ones([inputs.get_shape()[-1]]), trainable=False)
if is_training is not None:
batch_mean, batch_var = tf.nn.moments(inputs,[0])
train_mean = tf.assign(pop_mean,
pop_mean * decay + batch_mean * (1 - decay))
train_var = tf.assign(pop_var,
pop_var * decay + batch_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
return tf.nn.batch_normalization(inputs,
train_mean, train_var, beta, scale, epsilon)
else:
return tf.nn.batch_normalization(inputs,
pop_mean, pop_var, beta, scale, epsilon)
## regularization parameter
def batch_norm_wrapper(inputs, is_training, decay = 0.999):
scale = tf.Variable(tf.ones([inputs.get_shape()[-1]]))
beta = tf.Variable(tf.zeros([inputs.get_shape()[-1]]))
pop_mean = tf.Variable(tf.zeros([inputs.get_shape()[-1]]), trainable=False)
pop_var = tf.Variable(tf.ones([inputs.get_shape()[-1]]), trainable=False)
if is_training is not None:
batch_mean, batch_var = tf.nn.moments(inputs,[0])
train_mean = tf.assign(pop_mean,
pop_mean * decay + batch_mean * (1 - decay))
train_var = tf.assign(pop_var,
pop_var * decay + batch_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
return tf.nn.batch_normalization(inputs,
train_mean, train_var, beta, scale, epsilon)
else:
return tf.nn.batch_normalization(inputs,
pop_mean, pop_var, beta, scale, epsilon)
## regularization parameter
def _create(self):
d_loss = gan.graph.d_loss
g_loss = gan.graph.g_loss
g_lr = np.float32(config.g_learn_rate)
d_lr = np.float32(config.d_learn_rate)
gan.graph.d_vars = d_vars
g_defk = {k[2:]: v for k, v in config.items() if k[2:] in inspect.getargspec(config.g_trainer).args and k.startswith("d_")}
d_defk = {k[2:]: v for k, v in config.items() if k[2:] in inspect.getargspec(config.d_trainer).args and k.startswith("g_")}
g_optimizer = config.g_trainer(g_lr, **g_defk)
d_optimizer = config.d_trainer(d_lr, **d_defk)
if(config.clipped_gradients):
g_optimizer = capped_optimizer(g_optimizer, config.clipped_gradients, g_loss, g_vars)
d_optimizer = capped_optimizer(d_optimizer, config.clipped_gradients, d_loss, d_vars)
else:
g_optimizer = g_optimizer.minimize(g_loss, var_list=g_vars)
d_optimizer = d_optimizer.minimize(d_loss, var_list=d_vars)
gan.graph.clip = [tf.assign(d,tf.clip_by_value(d, -config.d_clipped_weights, config.d_clipped_weights)) for d in d_vars]
return g_optimizer, d_optimizer
def setup_popart(self):
# See https://arxiv.org/pdf/1602.07714.pdf for details.
self.old_std = tf.placeholder(tf.float32, shape=[1], name='old_std')
new_std = self.ret_rms.std
self.old_mean = tf.placeholder(tf.float32, shape=[1], name='old_mean')
new_mean = self.ret_rms.mean
self.renormalize_Q_outputs_op = []
for vs in [self.critic.output_vars, self.target_critic.output_vars]:
assert len(vs) == 2
M, b = vs
assert 'kernel' in M.name
assert 'bias' in b.name
assert M.get_shape()[-1] == 1
assert b.get_shape()[-1] == 1
self.renormalize_Q_outputs_op += [M.assign(M * self.old_std / new_std)]
self.renormalize_Q_outputs_op += [b.assign((b * self.old_std + self.old_mean - new_mean) / new_std)]
def __init__(self, log_prior, log_joint, prior_sampler,
hmc, observed, latent, n_chains=25, n_temperatures=1000,
verbose=False):
# Shape of latent: [chain_axis, num_data, data dims]
# Construct the tempered objective
self.n_chains = n_chains
self.n_temperatures = n_temperatures
self.verbose = verbose
with tf.name_scope("AIS"):
self.temperature = tf.placeholder(tf.float32, shape=[],
name="temperature")
def log_fn(observed):
return log_prior(observed) * (1 - self.temperature) + \
log_joint(observed) * self.temperature
self.log_fn = log_fn
self.log_fn_val = log_fn(merge_dicts(observed, latent))
self.sample_op, self.hmc_info = hmc.sample(
log_fn, observed, latent)
self.init_latent = [tf.assign(z, z_s)
for z, z_s in zip(latent.values(),
prior_sampler.values())]
def tune(self, acceptance_rate, fresh_start):
def adapt_stepsize():
new_step = tf.assign(self.step, (1 - fresh_start) * self.step + 1)
rate1 = tf.div(1.0, new_step + self.t0)
new_h_bar = tf.assign(
self.h_bar, (1 - fresh_start) * (1 - rate1) * self.h_bar +
rate1 * (self.delta - acceptance_rate))
log_epsilon = self.mu - tf.sqrt(new_step) / self.gamma * new_h_bar
rate = tf.pow(new_step, -self.kappa)
new_log_epsilon_bar = tf.assign(
self.log_epsilon_bar,
rate * log_epsilon + (1 - fresh_start) * (1 - rate) *
self.log_epsilon_bar)
with tf.control_dependencies([new_log_epsilon_bar]):
new_log_epsilon = tf.identity(log_epsilon)
return tf.exp(new_log_epsilon)
c = tf.cond(self.adapt_step_size,
adapt_stepsize,
lambda: tf.exp(self.log_epsilon_bar))
return c
def update(self, x):
# x: (chain_dims data_dims)
new_t = tf.assign(self.t, self.t + 1)
weight = (1 - self.decay) / (1 - tf.pow(self.decay, new_t))
# incr: (chain_dims data_dims)
incr = [weight * (q - mean) for q, mean in zip(x, self.mean)]
# mean: (1,...,1 data_dims)
update_mean = [mean.assign_add(
tf.reduce_mean(i, axis=self.chain_axes, keep_dims=True))
for mean, i in zip(self.mean, incr)]
# var: (1,...,1 data_dims)
new_var = [
(1 - weight) * var +
tf.reduce_mean(i * (q - mean), axis=self.chain_axes,
keep_dims=True)
for var, i, q, mean in zip(self.var, incr, x, update_mean)]
update_var = [tf.assign(var, n_var)
for var, n_var in zip(self.var, new_var)]
return update_var
def batch_normalization(x, scope, decay=0.999, eps=1e-6, training=True):
ndim = len(x.get_shape().as_list())
fdim = x.get_shape().as_list()[-1]
with tf.variable_scope(scope):
gamma = tf.get_variable("scale", [fdim], tf.float32, tf.constant_initializer(1.0))
beta = tf.get_variable("offset", [fdim], tf.float32, tf.constant_initializer(0.0))
mean = tf.get_variable("mean", [fdim], tf.float32, tf.constant_initializer(0.0), trainable=False)
var = tf.get_variable("variance", [fdim], tf.float32, tf.constant_initializer(1.0), trainable=False)
if training:
x_mean, x_var = tf.nn.moments(x, range(ndim - 1))
avg_mean = tf.assign(mean, mean * decay + x_mean * (1.0 - decay))
avg_var = tf.assign(var, var * decay + x_var * (1.0 - decay))
with tf.control_dependencies([avg_mean, avg_var]):
return tf.nn.batch_normalization(x, x_mean, x_var, beta, gamma, eps)
else:
return tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
def batch_normalization_with_mask(x, mask, scope, decay=0.999, eps=1e-6, training=True):
ndim = len(x.get_shape().as_list())
fdim = x.get_shape().as_list()[-1]
with tf.variable_scope(scope):
gamma = tf.get_variable("scale", [fdim], tf.float32, tf.constant_initializer(1.0))
beta = tf.get_variable("offset", [fdim], tf.float32, tf.constant_initializer(0.0))
mean = tf.get_variable("mean", [fdim], tf.float32, tf.constant_initializer(0.0), trainable=False)
var = tf.get_variable("variance", [fdim], tf.float32, tf.constant_initializer(1.0), trainable=False)
if training:
x_mean, x_var = tf.nn.weighted_moments(x, range(ndim - 1), mask)
avg_mean = tf.assign(mean, mean * decay + x_mean * (1.0 - decay))
avg_var = tf.assign(var, var * decay + x_var * (1.0 - decay))
with tf.control_dependencies([avg_mean, avg_var]):
return tf.nn.batch_normalization(x, x_mean, x_var, beta, gamma, eps)
else:
return tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
def test_ref_assign(self):
# Currently ngraph and tf have different assign semantics
# eval(ng.assign(a, 1)) resturns None, but eval(tf.assign(a, 1)) returns
# a which is 1.
# TODO: fix this test after assign op / user_deps are fixed in ngraph
# TODO: double assignments fails
# tf placeholder
a = tf.Variable(tf.constant(np.random.randn(2, 3), name="a"))
b = tf.Variable(tf.constant(np.random.randn(2, 3), name="b"))
init_op = tf.global_variables_initializer()
a_update = tf.assign(a, b)
# test
tf_result = self.tf_run(a_update, tf_init_op=init_op)
ng_result = self.ng_run(a)
ng.testing.assert_allclose(tf_result, ng_result)
def test_ref_assign_add(self):
# Currently ngraph and tf have different assign semantics
# eval(ng.assign(a, 1)) resturns None, but eval(tf.assign(a, 1)) returns
# a which is 1.
# TODO: fix this test after assign op / user_deps are fixed in ngraph
# TODO: double assignments fails
# tf placeholder
a = tf.Variable(tf.constant(np.random.randn(2, 3), name="a"))
b = tf.Variable(tf.constant(np.random.randn(2, 3), name="b"))
init_op = tf.global_variables_initializer()
a_update = tf.assign_add(a, b)
# test
tf_result = self.tf_run(a_update, tf_init_op=init_op)
ng_result = self.ng_run(a)
ng.testing.assert_allclose(tf_result, ng_result)
def _compute_global_pc(self, dataset, session, limit=None):
""" Compute the Principal Component. """
_dataset = dataset
if isinstance(limit, int):
_dataset = _dataset[:limit]
d = _dataset
s0, s1, s2, s3 = d.shape[0], d.shape[1], d.shape[2], d.shape[3]
flat = np.reshape(d, (s0, s1 * s2 * s3))
sigma = np.dot(flat.T, flat) / flat.shape[1]
U, S, V = np.linalg.svd(sigma)
pc = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + _EPSILON))), U.T)
self.global_pc.assign(pc, session)
return pc
# -----------------------
# Persistent Parameters
# -----------------------