def get_label_costs(coder, dataset, labels, batch_size=100):
"""
Return average cross entropy loss and class error rate on
dataset by coder object with its current weights.
"""
n_batches = dataset.shape[0] // batch_size
error = 0.
cost = 0.
for index in range(n_batches):
batch = dataset[index * batch_size : (index+1) * batch_size]
labels_batch = labels[index * batch_size : (index+1) * batch_size]
predicted = coder.get_hidden_values(batch)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=predicted,
labels=labels_batch)
cost += tf.reduce_mean(loss).eval()
bad_prediction = tf.not_equal(tf.argmax(predicted , 1), labels_batch)
error += tf.reduce_mean(tf.cast(bad_prediction, tf.float32)).eval()
return (cost / n_batches, error / n_batches)
python类not_equal()的实例源码
def bboxes_filter_labels(labels, bboxes,
out_labels=[], num_classes=np.inf,
scope=None):
"""Filter out labels from a collection. Typically used to get
of DontCare elements. Also remove elements based on the number of classes.
Return:
labels, bboxes: Filtered elements.
"""
with tf.name_scope(scope, 'bboxes_filter_labels', [labels, bboxes]):
mask = tf.greater_equal(labels, num_classes)
for l in labels:
mask = tf.logical_and(mask, tf.not_equal(labels, l))
labels = tf.boolean_mask(labels, mask)
bboxes = tf.boolean_mask(bboxes, mask)
return labels, bboxes
# =========================================================================== #
# Standard boxes computation.
# =========================================================================== #
def testCplxNotEqualGPU(self):
shapes1 = [(5,4,3), (5,4), (1,), (5,)]
shapes2 = [(5,4,3), (1,), (5,4), (5,)]
for [sh0, sh1] in zip(shapes1, shapes2):
x = (np.random.randn(np.prod(sh0)) +
1j*np.random.randn(np.prod(sh0))).astype(np.complex64)
y = (np.random.randn(np.prod(sh1)) +
1j*np.random.randn(np.prod(sh1))).astype(np.complex64)
if len(sh0) == 1:
ix = np.random.permutation(
np.arange(np.prod(sh1)))[:np.prod(sh1)//2]
y[ix] = x[0]
elif len(sh1) == 1:
ix = np.random.permutation(
np.arange(np.prod(sh0)))[:np.prod(sh0)//2]
x[ix] = y[0]
else:
ix = np.random.permutation(
np.arange(np.prod(sh0)))[:np.prod(sh0)//2]
x[ix] = y[ix]
x = np.reshape(x, sh0)
y = np.reshape(y, sh1)
self._compareGpu(x, y, np.not_equal, tf.not_equal)
def bboxes_filter_labels(labels, bboxes,
out_labels=[], num_classes=np.inf,
scope=None):
"""Filter out labels from a collection. Typically used to get
of DontCare elements. Also remove elements based on the number of classes.
Return:
labels, bboxes: Filtered elements.
"""
with tf.name_scope(scope, 'bboxes_filter_labels', [labels, bboxes]):
mask = tf.greater_equal(labels, num_classes)
for l in labels:
mask = tf.logical_and(mask, tf.not_equal(labels, l))
labels = tf.boolean_mask(labels, mask)
bboxes = tf.boolean_mask(bboxes, mask)
return labels, bboxes
# =========================================================================== #
# Standard boxes computation.
# =========================================================================== #
def errors(logits, labels, name=None):
"""Compute error mean and whether each unlabeled example is erroneous
Assume unlabeled examples have label == -1.
Compute the mean error over unlabeled examples.
Mean error is NaN if there are no unlabeled examples.
Note that unlabeled examples are treated differently in cost calculation.
"""
with tf.name_scope(name, "errors") as scope:
applicable = tf.not_equal(labels, -1)
labels = tf.boolean_mask(labels, applicable)
logits = tf.boolean_mask(logits, applicable)
predictions = tf.argmax(logits, -1)
labels = tf.cast(labels, tf.int64)
per_sample = tf.to_float(tf.not_equal(predictions, labels))
mean = tf.reduce_mean(per_sample, name=scope)
return mean, per_sample
def classification_costs(logits, labels, name=None):
"""Compute classification cost mean and classification cost per sample
Assume unlabeled examples have label == -1. For unlabeled examples, cost == 0.
Compute the mean over all examples.
Note that unlabeled examples are treated differently in error calculation.
"""
with tf.name_scope(name, "classification_costs") as scope:
applicable = tf.not_equal(labels, -1)
# Change -1s to zeros to make cross-entropy computable
labels = tf.where(applicable, labels, tf.zeros_like(labels))
# This will now have incorrect values for unlabeled examples
per_sample = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
# Retain costs only for labeled
per_sample = tf.where(applicable, per_sample, tf.zeros_like(per_sample))
# Take mean over all examples, not just labeled examples.
labeled_sum = tf.reduce_sum(per_sample)
total_count = tf.to_float(tf.shape(per_sample)[0])
mean = tf.div(labeled_sum, total_count, name=scope)
return mean, per_sample
def unwrap_output_sparse(self, final_state, include_stop_tokens=True):
"""
Retreive the beam search output from the final state.
Returns a sparse tensor with underlying dimensions of [batch_size, max_len]
"""
output_dense = final_state[0]
mask = tf.not_equal(output_dense, self.stop_token)
if include_stop_tokens:
output_dense = tf.concat(1, [output_dense[:, 1:],
tf.ones_like(output_dense[:, 0:1]) *
self.stop_token])
mask = tf.concat(1, [mask[:, 1:], tf.cast(tf.ones_like(mask[:, 0:1],
dtype=tf.int8),
tf.bool)])
return sparse_boolean_mask(output_dense, mask)
def mle_loss(self, outputs, targets):
'''Maximum likelihood estimation loss.'''
present_mask = tf.greater(targets, 0, name='present_mask')
# don't enfoce loss on true <unk>'s
unk_mask = tf.not_equal(targets, self.vocab.unk_index, name='unk_mask')
mask = tf.cast(tf.logical_and(present_mask, unk_mask), tf.float32)
output = tf.reshape(tf.concat(1, outputs), [-1, cfg.hidden_size])
if self.training and cfg.softmax_samples < len(self.vocab.vocab):
targets = tf.reshape(targets, [-1, 1])
mask = tf.reshape(mask, [-1])
loss = tf.nn.sampled_softmax_loss(self.softmax_w, self.softmax_b, output, targets,
cfg.softmax_samples, len(self.vocab.vocab))
loss *= mask
else:
logits = tf.nn.bias_add(tf.matmul(output, tf.transpose(self.softmax_w),
name='softmax_transform_mle'), self.softmax_b)
loss = tf.nn.seq2seq.sequence_loss_by_example([logits],
[tf.reshape(targets, [-1])],
[tf.reshape(mask, [-1])])
return tf.reshape(loss, [cfg.batch_size, -1])
def confidence_cnn13(image_with_alpha, input_size=512):
image = tf.slice(image_with_alpha,[0,0,0,0],[-1,-1,-1,3])
alpha = tf.slice(image_with_alpha,[0,0,0,3],[-1,-1,-1,1])
#print ('image', image)
#print ('alpha', alpha)
visable = tf.not_equal(alpha, tf.zeros_like(alpha))
confidence = confidence_cnn3(image, input_size)
final_confidence = tf.where(visable, confidence, tf.zeros_like(confidence))
#print ('final conf', final_confidence)
return final_confidence
def confidence_cnn14(image_with_alpha, input_size=512):
image = tf.slice(image_with_alpha,[0,0,0,0],[-1,-1,-1,3])
alpha = tf.slice(image_with_alpha,[0,0,0,3],[-1,-1,-1,1])
#print ('image', image)
#print ('alpha', alpha)
visable = tf.not_equal(alpha, tf.zeros_like(alpha))
confidence = confidence_cnn4(image, input_size)
final_confidence = tf.where(visable, confidence, tf.zeros_like(confidence))
#print ('final conf', final_confidence)
return final_confidence
def confidence_cnn23(image_with_alpha, input_size=512):
image = tf.slice(image_with_alpha,[0,0,0,0],[-1,-1,-1,3])
alpha = tf.slice(image_with_alpha,[0,0,0,3],[-1,-1,-1,1])
#print ('image', image)
#print ('alpha', alpha)
visable = tf.not_equal(alpha, tf.zeros_like(alpha))
confidence = confidence_cnn3(image, input_size)
negative_confidence = tf.multiply(tf.ones_like(confidence),tf.constant(-1.0))
final_confidence = tf.where(visable, confidence, negative_confidence)
#print ('final conf', final_confidence)
return final_confidence
def bboxes_filter_labels(labels, bboxes,
out_labels=[], num_classes=np.inf,
scope=None):
"""Filter out labels from a collection. Typically used to get
of DontCare elements. Also remove elements based on the number of classes.
Return:
labels, bboxes: Filtered elements.
"""
with tf.name_scope(scope, 'bboxes_filter_labels', [labels, bboxes]):
mask = tf.greater_equal(labels, num_classes)
for l in labels:
mask = tf.logical_and(mask, tf.not_equal(labels, l))
labels = tf.boolean_mask(labels, mask)
bboxes = tf.boolean_mask(bboxes, mask)
return labels, bboxes
# =========================================================================== #
# Standard boxes computation.
# =========================================================================== #
def _build_metric(self, model: 'code.model.abstract.Model') -> tf.Tensor:
with tf.name_scope(None, self.metric_name,
values=[self.dataset.source, self.dataset.target]):
x = self.dataset.source
y = self.dataset.target
length = self.dataset.length
# build mask
mask = tf.cast(tf.not_equal(y, tf.zeros_like(y)), tf.float32)
# create masked error tensor
errors = tf.not_equal(
model.inference_model(x, length, reuse=True), y
)
errors = tf.cast(errors, tf.float32) * mask # mask errors
# tf.sum(mask) is the number of unmasked elements
return tf.reduce_sum(errors) / tf.reduce_sum(mask)
def chi2(exp, obs):
"""
Compute CHI^2 statistics of non-zero expected elements
"""
zero = tf.constant(0, dtype=tf.float32)
mask = tf.not_equal(exp, zero)
def masking(tensor, mask):
return tf.boolean_mask(tensor, mask)
stat = tf.reduce_sum(
tf.div(
tf.pow(
tf.subtract(masking(obs, mask), masking(exp, mask)),
2),
masking(exp, mask)),
name="chi2_statistics")
return stat
def bboxes_filter_labels(labels, bboxes,
out_labels=[], num_classes=np.inf,
scope=None):
"""Filter out labels from a collection. Typically used to get
of DontCare elements. Also remove elements based on the number of classes.
Return:
labels, bboxes: Filtered elements.
"""
with tf.name_scope(scope, 'bboxes_filter_labels', [labels, bboxes]):
mask = tf.greater_equal(labels, num_classes)
for l in labels:
mask = tf.logical_and(mask, tf.not_equal(labels, l))
labels = tf.boolean_mask(labels, mask)
bboxes = tf.boolean_mask(bboxes, mask)
return labels, bboxes
# =========================================================================== #
# Standard boxes computation.
# =========================================================================== #
def add_embedding(self, embeddings):
#embed=np.load('glove{0}_uniform.npy'.format(self.emb_dim))
if embeddings is not None:
initializer = embeddings
else:
initializer = tf.random_uniform_initializer(-0.05,0.05)
with tf.variable_scope("Embed",regularizer=None):
embedding=tf.Variable(initial_value = initializer, trainable=True, name = 'embedding', dtype='float32')
ix=tf.to_int32(tf.not_equal(self.input,-1))*self.input
emb_tree=tf.nn.embedding_lookup(embedding,ix)
emb_tree=emb_tree*(tf.expand_dims(
tf.to_float(tf.not_equal(self.input,-1)),2))
return emb_tree
def build_graph(all_readers,
input_reader,
input_data_pattern,
all_eval_data_patterns,
batch_size=256):
original_video_id, original_input, unused_labels_batch, unused_num_frames = (
get_input_evaluation_tensors(
input_reader,
input_data_pattern,
batch_size=batch_size))
video_id_notequal_tensors = []
model_input_tensor = None
input_distance_tensors = []
for reader, data_pattern in zip(all_readers, all_eval_data_patterns):
video_id, model_input_raw, labels_batch, unused_num_frames = (
get_input_evaluation_tensors(
reader,
data_pattern,
batch_size=batch_size))
video_id_notequal_tensors.append(tf.reduce_sum(tf.cast(tf.not_equal(original_video_id, video_id), dtype=tf.float32)))
if model_input_tensor is None:
model_input_tensor = model_input_raw
input_distance_tensors.append(tf.reduce_mean(tf.reduce_sum(tf.square(model_input_tensor - model_input_raw), axis=1)))
video_id_mismatch_tensor = tf.stack(video_id_notequal_tensors)
input_distance_tensor = tf.stack(input_distance_tensors)
actual_batch_size = tf.shape(original_video_id)[0]
tf.add_to_collection("video_id_mismatch", video_id_mismatch_tensor)
tf.add_to_collection("input_distance", input_distance_tensor)
tf.add_to_collection("actual_batch_size", actual_batch_size)
def build_graph(all_readers,
input_reader,
input_data_pattern,
all_eval_data_patterns,
batch_size=256):
original_video_id, original_input, unused_labels_batch, unused_num_frames = (
get_input_evaluation_tensors(
input_reader,
input_data_pattern,
batch_size=batch_size))
video_id_equal_tensors = []
model_input_tensor = None
input_distance_tensors = []
for reader, data_pattern in zip(all_readers, all_eval_data_patterns):
video_id, model_input_raw, labels_batch, unused_num_frames = (
get_input_evaluation_tensors(
reader,
data_pattern,
batch_size=batch_size))
video_id_equal_tensors.append(tf.reduce_sum(tf.cast(tf.not_equal(original_video_id, video_id), dtype=tf.float32)))
if model_input_tensor is None:
model_input_tensor = model_input_raw
input_distance_tensors.append(tf.reduce_mean(tf.reduce_sum(tf.square(model_input_tensor - model_input_raw), axis=1)))
video_id_equal_tensor = tf.stack(video_id_equal_tensors)
input_distance_tensor = tf.stack(input_distance_tensors)
tf.add_to_collection("video_id_equal", video_id_equal_tensor)
tf.add_to_collection("input_distance", input_distance_tensor)
def bp_mll_loss(y_true, y_pred):
# get true and false labels
shape = tf.shape(y_true)
y_i = tf.equal(y_true, tf.ones(shape))
y_i_bar = tf.not_equal(y_true, tf.ones(shape))
# get indices to check
truth_matrix = tf.to_float(pairwise_and(y_i, y_i_bar))
# calculate all exp'd differences
sub_matrix = pairwise_sub(y_pred, y_pred)
exp_matrix = tf.exp(tf.negative(sub_matrix))
# check which differences to consider and sum them
sparse_matrix = tf.multiply(exp_matrix, truth_matrix)
sums = tf.reduce_sum(sparse_matrix, axis=[1,2])
# get normalizing terms and apply them
y_i_sizes = tf.reduce_sum(tf.to_float(y_i), axis=1)
y_i_bar_sizes = tf.reduce_sum(tf.to_float(y_i_bar), axis=1)
normalizers = tf.multiply(y_i_sizes, y_i_bar_sizes)
results = tf.divide(sums, normalizers)
# sum over samples
return tf.reduce_sum(results)
# compute pairwise differences between elements of the tensors a and b
def apply_loss(labels, net_out, loss_fn, weight_decay, is_training,
return_mean_loss=False, mask_voids=True):
'''Applies the user-specified loss function and returns the loss
Note:
SoftmaxCrossEntropyWithLogits expects labels NOT to be one-hot
and net_out to be one-hot.
'''
cfg = gflags.cfg
if mask_voids and len(cfg.void_labels):
# TODO Check this
print('Masking the void labels')
mask = tf.not_equal(labels, cfg.void_labels)
labels *= tf.cast(mask, 'int32') # void_class --> 0 (random class)
# Train loss
loss = loss_fn(labels=labels,
logits=tf.reshape(net_out, [-1, cfg.nclasses]))
mask = tf.cast(mask, 'float32')
loss *= mask
else:
# Train loss
loss = loss_fn(labels=labels,
logits=tf.reshape(net_out, [-1, cfg.nclasses]))
if is_training:
loss = apply_l2_penalty(loss, weight_decay)
# Return the mean loss (over pixels *and* batches)
if return_mean_loss:
if mask_voids and len(cfg.void_labels):
return tf.reduce_sum(loss) / tf.reduce_sum(mask)
else:
return tf.reduce_mean(loss)
else:
return loss
def _instantiate_subnet(self, batch, block_idx, seq_prefix):
def zeros_fn():
return tf.zeros_like(batch)
def base_case_fn():
return self._children[block_idx, seq_prefix](batch)
def recursive_case_fn():
first_subnet = self._instantiate_subnet(
batch, block_idx, seq_prefix + (0,))
return self._instantiate_subnet(
first_subnet, block_idx, seq_prefix + (1,))
if len(seq_prefix) == self._fractal_block_depth:
return base_case_fn()
else:
choice = self._drop_path_choices[self._choice_id[(block_idx, seq_prefix)]]
base_case = tf.cond(
tf.not_equal(choice, self._JUST_RECURSE), base_case_fn, zeros_fn)
base_case.set_shape(batch.get_shape())
recursive_case = tf.cond(
tf.not_equal(choice, self._JUST_BASE), recursive_case_fn, zeros_fn)
recursive_case.set_shape(batch.get_shape())
cases = [
(tf.equal(choice, self._BOTH),
lambda: self._mixer(base_case, recursive_case)),
(tf.equal(choice, self._JUST_BASE), lambda: base_case),
(tf.equal(choice, self._JUST_RECURSE), lambda: recursive_case)]
result = tf.case(cases, lambda: base_case)
result.set_shape(batch.get_shape())
return result
def not_equal(x, y):
'''Element-wise inequality between two tensors.
Returns a bool tensor.
'''
return tf.not_equal(x, y)
def get_weights(sequence, eos_id, include_first_eos=True):
cumsum = tf.cumsum(tf.to_float(tf.not_equal(sequence, eos_id)), axis=1)
range_ = tf.range(start=1, limit=tf.shape(sequence)[1] + 1)
range_ = tf.tile(tf.expand_dims(range_, axis=0), [tf.shape(sequence)[0], 1])
weights = tf.to_float(tf.equal(cumsum, tf.to_float(range_)))
if include_first_eos:
weights = weights[:,:-1]
shape = [tf.shape(weights)[0], 1]
weights = tf.concat([tf.ones(tf.stack(shape)), weights], axis=1)
return tf.stop_gradient(weights)
def get_weights(sequence, eos_id, include_first_eos=True):
cumsum = tf.cumsum(tf.to_float(tf.not_equal(sequence, eos_id)), axis=1)
range_ = tf.range(start=1, limit=tf.shape(sequence)[1] + 1)
range_ = tf.tile(tf.expand_dims(range_, axis=0), [tf.shape(sequence)[0], 1])
weights = tf.to_float(tf.equal(cumsum, tf.to_float(range_)))
if include_first_eos:
weights = weights[:,:-1]
shape = [tf.shape(weights)[0], 1]
weights = tf.concat([tf.ones(tf.stack(shape)), weights], axis=1)
return tf.stop_gradient(weights)
def print_mask_parameter_counts():
print("# Mask Parameter Counts")
print(" - Mask1: {0}".format(
sess.run(tf.reduce_sum(tf.to_float(tf.not_equal(indicator_matrix1, tf.zeros_like(indicator_matrix1)))))))
print(" - Mask2: {0}".format(
sess.run(tf.reduce_sum(tf.to_float(tf.not_equal(indicator_matrix2, tf.zeros_like(indicator_matrix2)))))))
print(" - Mask3: {0}".format(
sess.run(tf.reduce_sum(tf.to_float(tf.not_equal(indicator_matrix3, tf.zeros_like(indicator_matrix3)))))))
def not_equal(self, x, y):
'''Element-wise inequality between two tensors.
Returns a bool tensor.
'''
return tf.not_equal(x, y)
def preprocess(data):
# PaddingFIFOQueue pads to the max size seen in the data (instead of the minibatch)
# by chopping off the ends, this limits redundant computations in the output layer
sequence_length = tf.reduce_sum(tf.cast(tf.not_equal(data, 0), dtype=tf.int32), axis=1)
maximum_sequence_length = tf.reduce_max(sequence_length)
data = data[:, :maximum_sequence_length]
source = data[:, :-1]
target = data[:, 1:]
sequence_length -= 1
return source, target, sequence_length
tensorflow_backend.py 文件源码
项目:deep-learning-keras-projects
作者: jasmeetsb
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def not_equal(x, y):
"""Element-wise inequality between two tensors.
# Returns
A bool tensor.
"""
return tf.not_equal(x, y)
def error(self):
mistakes = tf.not_equal(
tf.argmax(self._target, 2), tf.argmax(self.prediction, 2))
mistakes = tf.cast(mistakes, tf.float32)
mask = tf.sign(tf.reduce_max(self._target, reduction_indices=2))
mistakes *= mask
# Average over actual sequence lengths.
mistakes = tf.reduce_sum(mistakes, reduction_indices=1)
mistakes /= tf.cast(self._length, tf.float32)
return tf.reduce_mean(mistakes)
def num_of_error(self):
mistakes = tf.not_equal(
tf.argmax(self._target, 2), tf.argmax(self.prediction, 2))
mistakes = tf.cast(mistakes, tf.float32)
mask = tf.sign(tf.reduce_max(self._target, reduction_indices=2))
mistakes *= mask
# Average over actual sequence lengths.
mistakes = tf.reduce_sum(mistakes, reduction_indices=1)
return mistakes