def _default_local_init_op():
return control_flow_ops.group(variables.initialize_local_variables(),
data_flow_ops.initialize_all_tables())
python类group()的实例源码
def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
"""Accumulate histograms in new variables."""
with variable_scope.variable_scope(
None, 'hist_accumulate', [hist_true, hist_false]):
# Holds running total histogram of scores for records labeled True.
hist_true_acc = variable_scope.get_variable(
'hist_true_acc',
initializer=init_ops.zeros_initializer(
[nbins],
dtype=hist_true.dtype),
collections=collections,
trainable=False)
# Holds running total histogram of scores for records labeled False.
hist_false_acc = variable_scope.get_variable(
'hist_false_acc',
initializer=init_ops.zeros_initializer(
[nbins],
dtype=hist_false.dtype),
collections=collections,
trainable=False)
update_op = control_flow_ops.group(
hist_true_acc.assign_add(hist_true),
hist_false_acc.assign_add(hist_false),
name='update_op')
return hist_true_acc, hist_false_acc, update_op
def update_weights(self, train_op):
"""Updates the model weights.
This function must be called on at least one worker after `minimize`.
In distributed training this call can be omitted on non-chief workers to
speed up training.
Args:
train_op: The operation returned by the `minimize` call.
Returns:
An Operation that updates the model weights.
"""
with ops.control_dependencies([train_op]):
update_ops = []
# Copy over unshrinked weights to user provided variables.
for name in ['sparse_features_weights', 'dense_features_weights']:
for var, slot_var in zip(self._variables[name],
self._slots['unshrinked_' + name]):
update_ops.append(var.assign(slot_var))
# Apply proximal step.
with ops.control_dependencies(update_ops):
update_ops = []
for name in ['sparse_features_weights', 'dense_features_weights']:
for var in self._variables[name]:
with ops.device(var.device):
# pylint: disable=protected-access
update_ops.append(
gen_sdca_ops._sdca_shrink_l1(
self._convert_n_to_tensor(
[var], as_ref=True),
l1=self._symmetric_l1_regularization(),
l2=self._symmetric_l2_regularization()))
return control_flow_ops.group(*update_ops)
def _train_op(self, features, labels, train_op_fn, logits):
"""Returns op for the training step."""
loss = self._training_loss(features, labels, logits)
train_op = train_op_fn(loss)
if self._enable_centered_bias:
centered_bias_step = [_centered_bias_step(
self.logits_dimension,
self._centered_bias_weight_collection,
labels,
self._train_loss_fn)]
train_op = control_flow_ops.group(train_op, *centered_bias_step)
return train_op
def _train_op(self, features, labels, train_op_fn, logits):
"""Returns op for the training step."""
loss = self._training_loss(features, labels, logits)
train_op = train_op_fn(loss)
if self._enable_centered_bias:
centered_bias_step = [_centered_bias_step(
self.logits_dimension,
self._centered_bias_weight_collection,
labels,
self._train_loss_fn)]
train_op = control_flow_ops.group(train_op, *centered_bias_step)
return train_op
def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
"""Accumulate histograms in new variables."""
with variable_scope.variable_scope(
None, 'hist_accumulate', [hist_true, hist_false]):
# Holds running total histogram of scores for records labeled True.
hist_true_acc = variable_scope.get_variable(
'hist_true_acc',
initializer=init_ops.zeros_initializer(
[nbins],
dtype=hist_true.dtype),
collections=collections,
trainable=False)
# Holds running total histogram of scores for records labeled False.
hist_false_acc = variable_scope.get_variable(
'hist_false_acc',
initializer=init_ops.zeros_initializer(
[nbins],
dtype=hist_false.dtype),
collections=collections,
trainable=False)
update_op = control_flow_ops.group(
hist_true_acc.assign_add(hist_true),
hist_false_acc.assign_add(hist_false),
name='update_op')
return hist_true_acc, hist_false_acc, update_op
def _AddShardedRestoreOps(self, filename_tensor, per_device,
restore_sequentially, reshape):
"""Add Ops to save variables from multiple devices.
Args:
filename_tensor: Tensor for the path of the file to load.
per_device: A list of (device, _VarToSave) pairs, as
returned by _GroupByDevices().
restore_sequentially: True if we want to restore variables sequentially
within a shard.
reshape: True if we want to reshape loaded tensors to the shape of
the corresponding variable.
Returns:
An Operation that restores the variables.
"""
sharded_restores = []
for shard, (device, vars_to_save) in enumerate(per_device):
with ops.device(device):
sharded_restores.append(self._AddRestoreOps(
filename_tensor,
vars_to_save,
restore_sequentially,
reshape,
preferred_shard=shard,
name="restore_shard"))
return control_flow_ops.group(*sharded_restores, name="restore_all")
def _apply_sparse(self, grad, var):
lr = (self._lr_t *
math_ops.sqrt(1 - self._beta2_power)
/ (1 - self._beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad.values * (1 - self._beta1_t)
m_t = state_ops.assign(m, m * self._beta1_t,
use_locking=self._use_locking)
m_t = state_ops.scatter_add(m_t, grad.indices, m_scaled_g_values,
use_locking=self._use_locking)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad.values * grad.values) * (1 - self._beta2_t)
v_t = state_ops.assign(v, v * self._beta2_t, use_locking=self._use_locking)
v_t = state_ops.scatter_add(v_t, grad.indices, v_scaled_g_values,
use_locking=self._use_locking)
v_sqrt = tf.pow(v_t, self._pow_t)
var_update = state_ops.assign_sub(var,
lr * m_t / (v_sqrt + self._epsilon_t),
use_locking=self._use_locking)
# regularization
var_update = state_ops.assign_sub(var_update,
self._sparse_regularization * var,
use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _finish(self, update_ops, name_scope):
# Update the power accumulators.
with ops.control_dependencies(update_ops):
with ops.device(self._beta1_power.device):
update_beta1 = self._beta1_power.assign(
self._beta1_power * self._beta1_t,
use_locking=self._use_locking)
update_beta2 = self._beta2_power.assign(
self._beta2_power * self._beta2_t,
use_locking=self._use_locking)
return control_flow_ops.group(*update_ops + [update_beta1, update_beta2],
name=name_scope)
def _apply_dense(self, grad, var):
lr = (self._lr_t *
math_ops.sqrt(1 - self._beta2_power)
/ (1 - self._beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - self._beta1_t)
m_t = m * self._beta1_t
m_t = m_t + m_scaled_g_values
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
#######################################################################################
v_scaled_g_values = tf.pow(grad - m_t, 2) * (1 - self._beta2_t)
#######################################################################################
v_t = v * self._beta2_t
v_t = v_t + v_scaled_g_values
v_sqrt = tf.pow(v_t, self._pow_t)
var_update = state_ops.assign_sub(var,
lr * m_t / (v_sqrt + self._epsilon_t),
use_locking=self._use_locking)
# regularization
var_update = state_ops.assign_sub(var_update,
self._dense_regularization * var,
use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _apply_sparse(self, grad, var):
lr = (self._lr_t *
math_ops.sqrt(1 - self._beta2_power)
/ (1 - self._beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad.values * (1 - self._beta1_t)
m_t = state_ops.assign(m, m * self._beta1_t,
use_locking=self._use_locking)
m_t = state_ops.scatter_add(m_t, grad.indices, m_scaled_g_values,
use_locking=self._use_locking)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad.values * grad.values) * (1 - self._beta2_t)
v_t = state_ops.assign(v, v * self._beta2_t, use_locking=self._use_locking)
v_t = state_ops.scatter_add(v_t, grad.indices, v_scaled_g_values,
use_locking=self._use_locking)
v_sqrt = tf.pow(v_t, self._pow_t)
var_update = state_ops.assign_sub(var,
lr * m_t / (v_sqrt + self._epsilon_t),
use_locking=self._use_locking)
# regularization
var_update = state_ops.assign_sub(var_update,
self._sparse_regularization * var,
use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def export(self, last_checkpoint, output_dir):
"""Builds a prediction graph and xports the model.
Args:
last_checkpoint: Path to the latest checkpoint file from training.
output_dir: Path to the folder to be used to output the model.
"""
logging.info('Exporting prediction graph to %s', output_dir)
with tf.Session(graph=tf.Graph()) as sess:
# Build and save prediction meta graph and trained variable values.
inputs, outputs = self.build_prediction_graph()
signature_def_map = {
'serving_default': signature_def_utils.predict_signature_def(inputs, outputs)
}
init_op = tf.global_variables_initializer()
sess.run(init_op)
self.restore_from_checkpoint(sess, self.inception_checkpoint_file,
last_checkpoint)
init_op_serving = control_flow_ops.group(
variables.local_variables_initializer(),
tf.tables_initializer())
builder = saved_model_builder.SavedModelBuilder(output_dir)
builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map=signature_def_map,
legacy_init_op=init_op_serving)
builder.save(False)
def _extract_metric_update_ops(eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
# Sort metrics lexicographically so graph is identical every time.
for name, metric_ops in sorted(six.iteritems(eval_dict)):
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
if update_ops:
update_op = control_flow_ops.group(*update_ops)
else:
update_op = None
return update_op, value_ops
def _finish(self, update_ops, steps_and_params, name_scope):
""""""
return control_flow_ops.group(*update_ops, name=name_scope)
#=============================================================
def _finish(self, update_ops, steps_and_params, name_scope):
""""""
return control_flow_ops.group(*update_ops, name=name_scope)
#=============================================================
sdca_ops.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def update_weights(self, train_op):
"""Updates the model weights.
This function must be called on at least one worker after `minimize`.
In distributed training this call can be omitted on non-chief workers to
speed up training.
Args:
train_op: The operation returned by the `minimize` call.
Returns:
An Operation that updates the model weights.
"""
with ops.control_dependencies([train_op]):
update_ops = []
# Copy over unshrinked weights to user provided variables.
for name in ['sparse_features_weights', 'dense_features_weights']:
for var, slot_var in zip(self._variables[name],
self._slots['unshrinked_' + name]):
update_ops.append(var.assign(slot_var))
# Apply proximal step.
with ops.control_dependencies(update_ops):
update_ops = []
for name in ['sparse_features_weights', 'dense_features_weights']:
for var in self._variables[name]:
with ops.device(var.device):
# pylint: disable=protected-access
update_ops.append(
gen_sdca_ops._sdca_shrink_l1(
self._convert_n_to_tensor(
[var], as_ref=True),
l1=self._symmetric_l1_regularization(),
l2=self._symmetric_l2_regularization()))
return control_flow_ops.group(*update_ops)
graph_actions.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def _get_local_init_op():
local_init_op = _get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [variables.local_variables_initializer(),
data_flow_ops.tables_initializer()]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
return local_init_op
cudnn_rnn_ops.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def restore(self, restored_tensors, restored_shapes):
weights = restored_tensors[:len(restored_tensors) // 2]
biases = restored_tensors[len(restored_tensors) // 2:]
params = self._canonical_to_params(weights, biases)
if not isinstance(params, tuple):
params = (params,)
assign_ops = [
state_ops.assign(
variable, param, validate_shape=False)
for variable, param in zip(self._variables, params)
]
return control_flow_ops.group(*assign_ops)
cudnn_rnn_ops_benchmark.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def benchmarkTfRNNLSTMTraining(self):
test_configs = self._GetTestConfig()
for config_name, config in test_configs.items():
num_layers = config["num_layers"]
num_units = config["num_units"]
batch_size = config["batch_size"]
seq_length = config["seq_length"]
with ops.Graph().as_default(), ops.device("/gpu:0"):
inputs = seq_length * [
array_ops.zeros([batch_size, num_units], dtypes.float32)
]
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = core_rnn_cell_impl.LSTMCell(
num_units=num_units, initializer=initializer, state_is_tuple=True)
multi_cell = core_rnn_cell_impl.MultiRNNCell(
[cell() for _ in range(num_layers)])
outputs, final_state = core_rnn.static_rnn(
multi_cell, inputs, dtype=dtypes.float32)
trainable_variables = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
training_op = control_flow_ops.group(*gradients)
self._BenchmarkOp(training_op, "tf_rnn_lstm %s %s" %
(config_name, self._GetConfigDesc(config)))
def training_graph(self, input_data, input_labels, data_spec=None,
epoch=None, **tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
"""
data_spec = [constants.DATA_FLOAT] if data_spec is None else data_spec
tree_graphs = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = input_data
tree_labels = input_labels
if self.params.bagging_fraction < 1.0:
# TODO(thomaswc): This does sampling without replacment. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r, array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(
array_ops.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(input_data, gather_indices)
tree_labels = array_ops.gather(input_labels, gather_indices)
if self.params.bagged_features:
tree_data = self._bag_features(i, tree_data)
initialization = self.trees[i].tree_initialization()
with ops.control_dependencies([initialization]):
tree_graphs.append(
self.trees[i].training_graph(
tree_data, tree_labels, seed, data_spec=data_spec,
epoch=([0] if epoch is None else epoch),
**tree_kwargs))
return control_flow_ops.group(*tree_graphs, name='train')