def _compute_global_std(self, dataset, session, limit=None):
""" Compute std of a dataset. A limit can be specified for faster
computation, considering only 'limit' first elements. """
_dataset = dataset
std = 0.
if isinstance(limit, int):
_dataset = _dataset[:limit]
if isinstance(_dataset, np.ndarray) and not self.global_std_pc:
std = np.std(_dataset)
else:
for i in range(len(dataset)):
if not self.global_std_pc:
std += np.std(dataset[i]) / len(dataset)
else:
std += (np.std(dataset[i], axis=(0, 1),
keepdims=True) / len(dataset))[0][0]
self.global_std.assign(std, session)
return std
python类assign()的实例源码
def set_value(var, value, session=None):
""" set_value.
Set a variable's value. If no session provided, use default one.
Arguments:
var: `Variable`. The variable to assign a value.
value: The value to assign. Must be compatible with variable dtype.
session: `Session`. The session to perform the assignation.
Default: the default session.
"""
op = tf.assign(var, value=value)
if not session:
session = tf.get_default_session()
return op.eval(session=session)
def init_training_mode():
""" init_training_mode.
Creates `is_training` variable and its ops if they haven't be created
yet. This op is required if you are using layers such as dropout or
batch normalization independently of TFLearn models (DNN or Trainer class).
"""
# 'is_training' collection stores the training mode variable
coll = tf.get_collection('is_training')
if len(coll) == 0:
tr_var = variable(
"is_training", dtype=tf.bool, shape=[],
initializer=tf.constant_initializer(False),
trainable=False)
tf.add_to_collection('is_training', tr_var)
# 'is_training_ops' stores the ops to update training mode variable
a = tf.assign(tr_var, True)
b = tf.assign(tr_var, False)
tf.add_to_collection('is_training_ops', a)
tf.add_to_collection('is_training_ops', b)
def set_default_value(self, sess, caffe_mat, layer_id_map):
for layer_name, idxs in layer_id_map.items():
idx, bias_term = idxs
weight = caffe_mat[idx][1][0].transpose((2, 3, 1, 0))
if bias_term:
bias = caffe_mat[idx][1][1]
if layer_name.startswith('upscore'):
weight = weight[:, :, :self.output_dim, :self.output_dim]
bias = bias[:self.output_dim]
if layer_name.startswith('score'):
weight = weight[:, :, :, :self.output_dim]
bias = bias[:self.output_dim]
name = layer_name + '_weight'
sess.run(tf.assign(self.net[name], weight))
if bias_term:
name = layer_name + '_bias'
sess.run(tf.assign(self.net[name], bias))
def accumulate_strings(values, name="strings"):
"""Accumulates strings into a vector.
Args:
values: A 1-d string tensor that contains values to add to the accumulator.
Returns:
A tuple (value_tensor, update_op).
"""
tf.assert_type(values, tf.string)
strings = tf.Variable(
name=name,
initial_value=[],
dtype=tf.string,
trainable=False,
collections=[],
validate_shape=True)
value_tensor = tf.identity(strings)
update_op = tf.assign(
ref=strings, value=tf.concat([strings, values], 0), validate_shape=False)
return value_tensor, update_op
def update_hyper_param(self):
assign_hyper_ops = []
self._mu = tf.identity(tf.cond(
self._do_tune, lambda: self.get_mu_tensor(),
lambda: self._mu_var))
with tf.control_dependencies([self._mu]):
self._lr = tf.identity(tf.cond(
self._do_tune, lambda: self.get_lr_tensor(),
lambda: self._lr_var))
with tf.control_dependencies([self._mu, self._lr]):
if self._use_unsmoothed_lr_mu:
assign_hyper_ops.append(tf.assign(self._mu_var, self._mu) )
assign_hyper_ops.append(tf.assign(self._lr_var, self._lr) )
else:
self._mu = self._beta * self._mu_var + (1 - self._beta) * self._mu
self._lr = self._beta * self._lr_var + (1 - self._beta) * self._lr
with tf.control_dependencies([self._mu, self._lr] ):
assign_hyper_ops.append(tf.assign(self._mu_var, self._mu) )
assign_hyper_ops.append(tf.assign(self._lr_var, self._lr) )
assign_hyper_op = tf.group(*assign_hyper_ops)
return assign_hyper_op
def set_param_values(self, flattened_params, sess=None, **tags):
debug = tags.pop("debug", False)
param_values = unflatten_tensors(
flattened_params, self.get_param_shapes(**tags))
ops = []
feed_dict = dict()
for param, dtype, value in zip(
self.get_params(**tags),
self.get_param_dtypes(**tags),
param_values):
if param not in self._cached_assign_ops:
assign_placeholder = tf.placeholder(dtype=param.dtype.base_dtype, shape=param.get_shape())
assign_op = tf.assign(param, assign_placeholder)
self._cached_assign_ops[param] = assign_op
self._cached_assign_placeholders[param] = assign_placeholder
ops.append(self._cached_assign_ops[param])
feed_dict[self._cached_assign_placeholders[param]] = value.astype(dtype)
if debug:
print("setting value of %s" % param.name)
tf.get_default_session().run(ops, feed_dict=feed_dict)
deterministic_mlp_regressor.py 文件源码
项目:rllabplusplus
作者: shaneshixiang
项目源码
文件源码
阅读 34
收藏 0
点赞 0
评论 0
def predict_sym(self, xs):
return L.get_output(self.l_out, xs)
# def fit(self, xs, ys):
# if self._normalize_inputs:
# # recompute normalizing constants for inputs
# new_mean = np.mean(xs, axis=0, keepdims=True)
# new_std = np.std(xs, axis=0, keepdims=True) + 1e-8
# tf.get_default_session().run(tf.group(
# tf.assign(self._x_mean_var, new_mean),
# tf.assign(self._x_std_var, new_std),
# ))
# inputs = [xs, ys]
# loss_before = self._optimizer.loss(inputs)
# if self._name:
# prefix = self._name + "_"
# else:
# prefix = ""
# logger.record_tabular(prefix + 'LossBefore', loss_before)
# self._optimizer.optimize(inputs)
# loss_after = self._optimizer.loss(inputs)
# logger.record_tabular(prefix + 'LossAfter', loss_after)
# logger.record_tabular(prefix + 'dLoss', loss_before - loss_after)
def clear_fn(self):
color = tf.placeholder(tf.float32, [3], name="ph_color")
depth = tf.placeholder(tf.float32, [], name="ph_depth")
packed_color = utils.pack_colors(color, 0)
tiled_color = tf.fill([self.height, self.width], packed_color)
tiled_depth = tf.fill([self.height, self.width], depth)
assign_color = tf.assign(self.color, tiled_color)
assign_depth = tf.assign(self.depth, tiled_depth)
self.commands.append(assign_color)
self.commands.append(assign_depth)
def _clear(color_val=[0., 0., 0.], depth_val=FLT_MIN):
self.args[color] = color_val
self.args[depth] = depth_val
return _clear
def __init__(self, num_units, num_cores, forget_bias=1.0, timestep=0):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
num_cores: int, The number of partitions (cores) in the LSTM state.
forget_bias: float, The bias added to forget gates (see above).
"""
self._num_units = num_units
self._forget_bias = forget_bias
# additional variables
self._cores = tf.constant(num_cores)
self._timestep = tf.Variable(timestep) # assign to 0 then terminal (or epoch)
self.reset_timestep = tf.assign(self._timestep, 0)
# auxiliary operators
dilated_mask, hold_mask = self._get_mask(num_cores)
self._dilated_mask = tf.constant(dilated_mask, dtype=tf.float32)
self._hold_mask = tf.constant(hold_mask, dtype=tf.float32)
def gnr_graph_checkpoint():
"""
???????checkpoint
:return:
"""
I = tf.placeholder(tf.float32, shape=[None, 3], name='I') # input
W = tf.Variable(tf.zeros(shape=[3, 2]), dtype=tf.float32, name='W') # weights
b = tf.Variable(tf.zeros(shape=[2]), dtype=tf.float32, name='b') # biases
O = tf.nn.relu(tf.matmul(I, W) + b, name='O') # activation / output
saver = tf.train.Saver()
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
tf.train.write_graph(sess.graph_def, MODEL_FOLDER, 'tfdroid.pbtxt') # ??TensorFlow??
# ???????????
sess.run(tf.assign(W, [[1, 2], [4, 5], [7, 8]]))
sess.run(tf.assign(b, [1, 1]))
# ??checkpoint????????
saver.save(sess, MODEL_FOLDER + 'tfdroid.ckpt')
def pull_params(self):
pull_actor_params = [tf.assign(l_p, g_p) for g_p, l_p in zip(self.globalAC.actor_params, self.actor_params)]
pull_critic_params = [tf.assign(l_p, g_p) for g_p, l_p in zip(self.globalAC.critic_params, self.critic_params)]
return [pull_actor_params, pull_critic_params]
def push_params(self): # ???push????assign?????globalAC???lobal???
push_actor_params = self.optimizer_actor.apply_gradients(zip(self.actor_grads, self.globalAC.actor_params))
push_critic_params = self.optimizer_critic.apply_gradients(zip(self.critic_grads, self.globalAC.critic_params))
return [push_actor_params, push_critic_params]
def set_value(v, val):
get_session().run(v.assign(val))
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype,[total_size])
start=0
assigns = []
for (shape,v) in zip(shapes,var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start+size],shape)))
start+=size
self.op = tf.group(*assigns)
def test_sampling(self):
hook = hooks.TrainSampleHook(
params={"every_n_steps": 10}, model_dir=self.model_dir,
run_config=tf.contrib.learn.RunConfig())
global_step = tf.contrib.framework.get_or_create_global_step()
no_op = tf.no_op()
hook.begin()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
#pylint: disable=W0212
mon_sess = monitored_session._HookedSession(sess, [hook])
# Should trigger for step 0
sess.run(tf.assign(global_step, 0))
mon_sess.run(no_op)
outfile = os.path.join(self.sample_dir, "samples_000000.txt")
with open(outfile, "rb") as readfile:
self.assertIn("Prediction followed by Target @ Step 0",
readfile.read().decode("utf-8"))
# Should not trigger for step 9
sess.run(tf.assign(global_step, 9))
mon_sess.run(no_op)
outfile = os.path.join(self.sample_dir, "samples_000009.txt")
self.assertFalse(os.path.exists(outfile))
# Should trigger for step 10
sess.run(tf.assign(global_step, 10))
mon_sess.run(no_op)
outfile = os.path.join(self.sample_dir, "samples_000010.txt")
with open(outfile, "rb") as readfile:
self.assertIn("Prediction followed by Target @ Step 10",
readfile.read().decode("utf-8"))
def test_capture(self):
global_step = tf.contrib.framework.get_or_create_global_step()
# Some test computation
some_weights = tf.get_variable("weigths", [2, 128])
computation = tf.nn.softmax(some_weights)
hook = hooks.MetadataCaptureHook(
params={"step": 5}, model_dir=self.model_dir,
run_config=tf.contrib.learn.RunConfig())
hook.begin()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
#pylint: disable=W0212
mon_sess = monitored_session._HookedSession(sess, [hook])
# Should not trigger for step 0
sess.run(tf.assign(global_step, 0))
mon_sess.run(computation)
self.assertEqual(gfile.ListDirectory(self.model_dir), [])
# Should trigger *after* step 5
sess.run(tf.assign(global_step, 5))
mon_sess.run(computation)
self.assertEqual(gfile.ListDirectory(self.model_dir), [])
mon_sess.run(computation)
self.assertEqual(
set(gfile.ListDirectory(self.model_dir)),
set(["run_meta", "tfprof_log", "timeline.json"]))
def get_loss_scale_update_op(loss_scale, loss_scale_normal_steps,
inc_loss_scale_every_n):
"""Returns the update op for loss scaling variables.
We maintain the counter `loss_scale_normal_steps` to count the number of steps
we have been using the current `loss_scale`. In most cases, this function
increments `loss_scale_normal_steps`. However, if `loss_scale_normal_steps` is
greater than the threshold `inc_loss_scale_every_n`, we double `loss_scale`
and reset `loss_scale_normal_steps` to zero.
This op is only called if the gradients don't have any infs or nans. Instead,
if infs or nans occur in the gradients, we immeditately halve `loss_scale` and
reset `loss_scale_normal_steps` to zero.
Args:
loss_scale: a tf.Variable represneting the loss_scale value.
loss_scale_normal_steps: a tf.Variable representing the number of training
steps that have run since the loss_scale last changed.
inc_loss_scale_every_n: a Python integer threshold. `loss_scale` is
increased every `inc_loss_scale_every_n` steps, unless the gradients have
infs or nans.
Returns:
An op for updating `loss_scale` and `loss_scale_normal_steps`.
"""
def increment_loss_scale_normal_steps_func():
return tf.group(loss_scale_normal_steps.assign_add(1))
def increase_loss_scale_func():
return tf.group(
tf.assign(loss_scale_normal_steps, 0),
tf.assign(loss_scale, loss_scale * 2))
# true_fn and false_fn must have the same type.
return tf.cond(loss_scale_normal_steps < inc_loss_scale_every_n,
increment_loss_scale_normal_steps_func,
increase_loss_scale_func)
def fix_variables(self, sess, pretrained_model):
print('Fix Resnet V1 layers..')
with tf.variable_scope('Fix_Resnet_V1') as scope:
with tf.device("/cpu:0"):
# fix RGB to BGR
conv1_rgb = tf.get_variable("conv1_rgb", [7, 7, 3, 64], trainable=False)
restorer_fc = tf.train.Saver({self._scope + "/conv1/weights": conv1_rgb})
restorer_fc.restore(sess, pretrained_model)
sess.run(tf.assign(self._variables_to_fix[self._scope + '/conv1/weights:0'],
tf.reverse(conv1_rgb, [2])))
def fix_variables(self, sess, pretrained_model):
print('Fix MobileNet V1 layers..')
with tf.variable_scope('Fix_MobileNet_V1') as scope:
with tf.device("/cpu:0"):
# fix RGB to BGR, and match the scale by (255.0 / 2.0)
Conv2d_0_rgb = tf.get_variable("Conv2d_0_rgb",
[3, 3, 3, max(int(32 * self._depth_multiplier), 8)],
trainable=False)
restorer_fc = tf.train.Saver({self._scope + "/Conv2d_0/weights": Conv2d_0_rgb})
restorer_fc.restore(sess, pretrained_model)
sess.run(tf.assign(self._variables_to_fix[self._scope + "/Conv2d_0/weights:0"],
tf.reverse(Conv2d_0_rgb / (255.0 / 2.0), [2])))