def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
"""Filters out groundtruth with no bounding boxes.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
Returns:
a dictionary of tensors containing only the groundtruth that have bounding
boxes.
"""
groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
nan_indicator_vector = tf.greater(tf.reduce_sum(tf.to_int32(
tf.is_nan(groundtruth_boxes)), reduction_indices=[1]), 0)
valid_indicator_vector = tf.logical_not(nan_indicator_vector)
valid_indices = tf.where(valid_indicator_vector)
return retain_groundtruth(tensor_dict, valid_indices)
python类logical_not()的实例源码
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single tower, and the number of pairs
equals the number of towers.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = tf.add_n(grads)
if use_mean and len(grads) > 1:
grad = tf.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def call(self, inputs, mask=None):
inputs_tensor = inputs
mask_inputs = K.expand_dims(mask)
inputs_shape = K.int_shape(inputs)
channel_axis = len(inputs_shape) - 1
if self.pool_mode == 'max':
mask_inv = tf.logical_not(mask_inputs)
negative_mask = K.cast(mask_inv, K.floatx()) * -1e20
negative_mask = K.repeat_elements(
negative_mask,
inputs_shape[channel_axis],
channel_axis
)
inputs_tensor = inputs + negative_mask
output = self.layer._pooling_function(
inputs_tensor,
self.layer.pool_size,
self.layer.strides,
self.layer.padding,
self.layer.data_format,
)
mask_inputs = K.cast(mask_inputs, K.floatx())
mask_output = self.layer._pooling_function(
mask_inputs,
self.layer.pool_size,
self.layer.strides,
self.layer.padding,
self.layer.data_format,
)
mask_output = K.repeat_elements(
mask_output,
inputs_shape[channel_axis],
channel_axis
)
return output * mask_output
def accuracy(logits, labels):
"""Calculates aggregated accuracy."""
is_correct = tf.nn.in_top_k(logits, labels, 1)
correct = tf.reduce_sum(tf.cast(is_correct, tf.int32))
incorrect = tf.reduce_sum(tf.cast(tf.logical_not(is_correct), tf.int32))
correct_count = tf.Variable(0, False)
incorrect_count = tf.Variable(0, False)
correct_count_update = tf.assign_add(correct_count, correct)
incorrect_count_update = tf.assign_add(incorrect_count, incorrect)
accuracy_op = tf.cast(correct_count, tf.float32) / tf.cast(
correct_count + incorrect_count, tf.float32)
return [correct_count_update, incorrect_count_update], accuracy_op
def accuracy(logits, labels):
"""Calculates aggregated accuracy."""
is_correct = tf.nn.in_top_k(logits, labels, 1)
correct = tf.reduce_sum(tf.cast(is_correct, tf.int32))
incorrect = tf.reduce_sum(tf.cast(tf.logical_not(is_correct), tf.int32))
correct_count = tf.Variable(0, False)
incorrect_count = tf.Variable(0, False)
correct_count_update = tf.assign_add(correct_count, correct)
incorrect_count_update = tf.assign_add(incorrect_count, incorrect)
accuracy_op = tf.cast(correct_count, tf.float32) / tf.cast(
correct_count + incorrect_count, tf.float32)
return [correct_count_update, incorrect_count_update], accuracy_op
def accuracy(logits, labels):
"""Calculates aggregated accuracy."""
is_correct = tf.nn.in_top_k(logits, labels, 1)
correct = tf.reduce_sum(tf.cast(is_correct, tf.int32))
incorrect = tf.reduce_sum(tf.cast(tf.logical_not(is_correct), tf.int32))
correct_count = tf.Variable(0, False)
incorrect_count = tf.Variable(0, False)
correct_count_update = tf.assign_add(correct_count, correct)
incorrect_count_update = tf.assign_add(incorrect_count, incorrect)
accuracy_op = tf.cast(correct_count, tf.float32) / tf.cast(
correct_count + incorrect_count, tf.float32)
return [correct_count_update, incorrect_count_update], accuracy_op
def _init_step_size(self, q, p, mass, get_gradient, get_log_posterior):
factor = 1.5
def loop_cond(step_size, last_acceptance_rate, cond):
return cond
def loop_body(step_size, last_acceptance_rate, cond):
# Calculate acceptance_rate
new_q, new_p = leapfrog_integrator(
q, p, tf.constant(0.0), step_size / 2,
get_gradient, mass)
new_q, new_p = leapfrog_integrator(
new_q, new_p, step_size, step_size / 2,
get_gradient, mass)
__, _, _, _, acceptance_rate = get_acceptance_rate(
q, p, new_q, new_p,
get_log_posterior, mass, self.data_axes)
acceptance_rate = tf.reduce_mean(acceptance_rate)
# Change step size and stopping criteria
new_step_size = tf.cond(
tf.less(acceptance_rate,
self.target_acceptance_rate),
lambda: step_size * (1.0 / factor),
lambda: step_size * factor)
cond = tf.logical_not(tf.logical_xor(
tf.less(last_acceptance_rate, self.target_acceptance_rate),
tf.less(acceptance_rate, self.target_acceptance_rate)))
return [new_step_size, acceptance_rate, cond]
new_step_size, _, _ = tf.while_loop(
loop_cond,
loop_body,
[self.step_size, tf.constant(1.0), tf.constant(True)]
)
return new_step_size
def __invert__(self):
return tf.logical_not(self)
def cond(self, time, inp, state, finished, output_ta):
"""Logical contidion for termination."""
continuation = tf.logical_not(tf.reduce_all(finished))
if self._pad_to is None:
return continuation
padding = time < self._pad_to
return tf.logical_or(continuation, padding)
# pylint: disable=W0221,I0011
# disable the changed signature of the method.
def Construct_Accuracy_op(self):
with tf.name_scope('accuracy'):
if self.model_dict['Model_Type'] is 'Classification' :
correct_prediction = tf.equal(tf.argmax(self.model_dict['Output'], 1), tf.argmax(self.model_dict['Output_ph'], 1))
false_images = tf.boolean_mask(self.model_dict['Reshaped_input'], tf.logical_not(correct_prediction))
tf.summary.image(name='False images', tensor=false_images)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', self.accuracy)
self.accuracy_op = True
elif self.model_dict['Model_Type'] is 'Segmentation' :
probs = tf.reshape((tf.sigmoid(self.model_dict['Output'])), shape=[ self.kwargs['Batch_size'], -1])
lab = tf.reshape(self.model_dict['Output_ph'], shape=[self.kwargs['Batch_size'], -1])
probs = tf.ceil(probs - 0.5 + 1e-10)
intersection = tf.reduce_sum(probs * lab, axis=1)
union = tf.reduce_sum(probs, 1) + tf.reduce_sum(lab, 1)
tf.summary.image(name='Input images',tensor = self.model_dict['Reshaped_input'])
tf.summary.image(name='Mask',tensor = tf.reshape(self.model_dict['Output_ph'], [-1, self.kwargs['Image_width'], self.kwargs['Image_height'], 1]))
tf.summary.image(name='Weight',tensor = tf.reshape(self.model_dict['Weight_ph'], [-1, self.kwargs['Image_width'], self.kwargs['Image_height'], 1]))
tf.summary.image(name='Output',tensor = (tf.sigmoid(self.model_dict['Output'])))
self.accuracy = tf.reduce_mean(2 * intersection / (union))
tf.summary.scalar('accuracy', self.accuracy)
self.accuracy_op = True
elif self.model_dict['Model_Type'] is 'Sequence' :
correct_prediction = tf.equal(tf.argmax(self.model_dict['Output'], 1), tf.reshape(tf.cast(tf.reshape(self.model_dict['Output_ph'], shape=[-1]), tf.int64), [-1]))
pre_acc = tf.to_float(correct_prediction) * tf.to_float(tf.reshape(self.model_dict['Mask'], [-1]))
pre_acc = tf.reduce_sum(pre_acc)
self.accuracy = tf.div(pre_acc, tf.maximum(1.0,tf.reduce_sum(tf.to_float(tf.reshape(self.model_dict['Mask'], [-1])))))
tf.reduce_sum(tf.to_float(tf.reshape(self.model_dict['Mask'], [-1])))
self.accuracy_op = True
tf.summary.scalar('accuracy', self.accuracy)
self.out_op = tf.argmax(self.model_dict['Output'], 1)
#tf.cond(self.accuracy > 0.92, lambda: tf.summary.image(name='False images', tensor=false_images), lambda: tf.summary.tensor_summary(name='correct_predictions', tensor=correct_prediction))
def _get_cubic_root(self):
"""Get the cubic root."""
# We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
# where x = sqrt(mu).
# We substitute x, which is sqrt(mu), with x = y + 1.
# It gives y^3 + py = q
# where p = (D^2 h_min^2)/(2*C) and q = -p.
# We use the Vieta's substution to compute the root.
# There is only one real solution y (which is in [0, 1] ).
# http://mathworld.wolfram.com/VietasSubstitution.html
assert_array = [
tf.Assert(
tf.logical_not(tf.is_nan(self._dist_to_opt_avg)),
[self._dist_to_opt_avg, ]),
tf.Assert(
tf.logical_not(tf.is_nan(self._h_min)),
[self._h_min, ]),
tf.Assert(
tf.logical_not(tf.is_nan(self._grad_var)),
[self._grad_var, ]),
tf.Assert(
tf.logical_not(tf.is_inf(self._dist_to_opt_avg)),
[self._dist_to_opt_avg, ]),
tf.Assert(
tf.logical_not(tf.is_inf(self._h_min)),
[self._h_min, ]),
tf.Assert(
tf.logical_not(tf.is_inf(self._grad_var)),
[self._grad_var, ])
]
with tf.control_dependencies(assert_array):
p = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0 / 3.0)
y = w - p / 3.0 / w
x = y + 1
return x
def tf_format_mnist_images(X, Y, Y_, n=100, lines=10):
correct_prediction = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))
correctly_recognised_indices = tf.squeeze(tf.where(correct_prediction), [1]) # indices of correctly recognised images
incorrectly_recognised_indices = tf.squeeze(tf.where(tf.logical_not(correct_prediction)), [1]) # indices of incorrectly recognised images
everything_incorrect_first = tf.concat(0, [incorrectly_recognised_indices, correctly_recognised_indices]) # images reordered with indeces of unrecognised images first
everything_incorrect_first = tf.slice(everything_incorrect_first, [0], [n]) # compute first 100 only - no space to display more anyway
# compute n=100 digits to display only
Xs = tf.gather(X, everything_incorrect_first)
Ys = tf.gather(Y, everything_incorrect_first)
Ys_ = tf.gather(Y_, everything_incorrect_first)
correct_prediction_s = tf.gather(correct_prediction, everything_incorrect_first)
digits_left = tf.image.grayscale_to_rgb(tensorflowvisu_digits.digits_left())
correct_tags = tf.gather(digits_left, tf.argmax(Ys_, 1)) # correct digits to be printed on the images
digits_right = tf.image.grayscale_to_rgb(tensorflowvisu_digits.digits_right())
computed_tags = tf.gather(digits_right, tf.argmax(Ys, 1)) # computed digits to be printed on the images
#superimposed_digits = correct_tags+computed_tags
superimposed_digits = tf.select(correct_prediction_s, tf.zeros_like(correct_tags),correct_tags+computed_tags) # only pring the correct and computed digits on unrecognised images
correct_bkg = tf.reshape(tf.tile([1.3,1.3,1.3], [28*28]), [1, 28,28,3]) # white background
incorrect_bkg = tf.reshape(tf.tile([1.3,1.0,1.0], [28*28]), [1, 28,28,3]) # red background
recognised_bkg = tf.gather(tf.concat(0, [incorrect_bkg, correct_bkg]), tf.cast(correct_prediction_s, tf.int32)) # pick either the red or the white background depending on recognised status
I = tf.image.grayscale_to_rgb(Xs)
I = ((1-(I+superimposed_digits))*recognised_bkg)/1.3 # stencil extra data on top of images and reorder them unrecognised first
I = tf.image.convert_image_dtype(I, tf.uint8, saturate=True)
Islices = [] # 100 images => 10x10 image block
for imslice in range(lines):
Islices.append(tf.concat(1, tf.unpack(tf.slice(I, [imslice*n//lines,0,0,0], [n//lines,28,28,3]))))
I = tf.concat(0, Islices)
return I
# n = HISTOGRAM_BUCKETS (global)
# Buckets the data into n buckets so that there are an equal number of data points in
# each bucket. Returns n+1 bucket boundaries. Spreads the reaminder data.size % n more
# or less evenly among the central buckets.
# data: 1-D ndarray containing float data, MUST BE SORTED in ascending order
# n: integer, the number of desired output buckets
# return value: ndarray, 1-D vector of size n+1 containing the bucket boundaries
# the first value is the min of the data, the last value is the max
def setUp(self):
super(LogicalNotTest, self).setUp()
self.ops = [
('logical_not', operator.invert, tf.logical_not, core.logical_not),
]
self.test_lt = self.original_lt < 10
def predict(self, answer, start_logits, end_logits, mask) -> Prediction:
masked_start_logits = exp_mask(start_logits, mask)
masked_end_logits = exp_mask(end_logits, mask)
batch_dim = tf.shape(start_logits)[0]
if len(answer) == 2 and all(x.dtype == tf.bool for x in answer):
none_logit = tf.get_variable("none-logit", initializer=self.non_init, dtype=tf.float32)
none_logit = tf.tile(tf.expand_dims(none_logit, 0), [batch_dim])
all_logits = tf.reshape(tf.expand_dims(masked_start_logits, 1) +
tf.expand_dims(masked_end_logits, 2),
(batch_dim, -1))
# (batch, (l * l) + 1) logits including the none option
all_logits = tf.concat([all_logits, tf.expand_dims(none_logit, 1)], axis=1)
log_norms = tf.reduce_logsumexp(all_logits, axis=1)
# Now build a "correctness" mask in the same format
correct_mask = tf.logical_and(tf.expand_dims(answer[0], 1), tf.expand_dims(answer[1], 2))
correct_mask = tf.reshape(correct_mask, (batch_dim, -1))
correct_mask = tf.concat([correct_mask, tf.logical_not(tf.reduce_any(answer[0], axis=1, keep_dims=True))],
axis=1)
log_correct = tf.reduce_logsumexp(
all_logits + VERY_NEGATIVE_NUMBER * (1 - tf.cast(correct_mask, tf.float32)), axis=1)
loss = tf.reduce_mean(-(log_correct - log_norms))
probs = tf.nn.softmax(all_logits)
tf.add_to_collection(tf.GraphKeys.LOSSES, loss)
return ConfidencePrediction(probs[:, :-1], masked_start_logits, masked_end_logits,
probs[:, -1], none_logit)
else:
raise NotImplemented()
def loop_continue_criterion(self, *args) -> tf.Tensor:
"""Decide whether to break out of the while loop.
Arguments:
loop_state: ``LoopState`` instance (see the docs for this module).
Represents current decoder loop state.
"""
loop_state = LoopState(*args)
finished = loop_state.feedables.finished
not_all_done = tf.logical_not(tf.reduce_all(finished))
before_max_len = tf.less(loop_state.feedables.step,
self.max_output_len)
return tf.logical_and(not_all_done, before_max_len)
def loop_continue_criterion(self, *args) -> tf.Tensor:
"""Decide whether to break out of the while loop.
Arguments:
loop_state: ``LoopState`` instance (see the docs for this module).
Represents current decoder loop state.
"""
loop_state = LoopState(*args)
finished = loop_state.feedables.finished
not_all_done = tf.logical_not(tf.reduce_all(finished))
before_max_len = tf.less(loop_state.feedables.step,
self.max_output_len)
return tf.logical_and(not_all_done, before_max_len)
def loop_continue_criterion(self, *args) -> tf.Tensor:
"""Decide whether to break out of the while loop.
Arguments:
loop_state: ``LoopState`` instance (see the docs for this module).
Represents current decoder loop state.
"""
loop_state = LoopState(*args)
finished = loop_state.feedables.finished
not_all_done = tf.logical_not(tf.reduce_all(finished))
before_max_len = tf.less(loop_state.feedables.step,
self.max_output_len)
return tf.logical_and(not_all_done, before_max_len)
def tf_format_mnist_images(X, Y, Y_, n=100, lines=10):
correct_prediction = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))
correctly_recognised_indices = tf.squeeze(tf.where(correct_prediction), [1]) # indices of correctly recognised images
incorrectly_recognised_indices = tf.squeeze(tf.where(tf.logical_not(correct_prediction)), [1]) # indices of incorrectly recognised images
everything_incorrect_first = tf.concat([incorrectly_recognised_indices, correctly_recognised_indices], 0) # images reordered with indeces of unrecognised images first
everything_incorrect_first = tf.slice(everything_incorrect_first, [0], [n]) # compute first 100 only - no space to display more anyway
# compute n=100 digits to display only
Xs = tf.gather(X, everything_incorrect_first)
Ys = tf.gather(Y, everything_incorrect_first)
Ys_ = tf.gather(Y_, everything_incorrect_first)
correct_prediction_s = tf.gather(correct_prediction, everything_incorrect_first)
digits_left = tf.image.grayscale_to_rgb(tensorflowvisu_digits.digits_left())
correct_tags = tf.gather(digits_left, tf.argmax(Ys_, 1)) # correct digits to be printed on the images
digits_right = tf.image.grayscale_to_rgb(tensorflowvisu_digits.digits_right())
computed_tags = tf.gather(digits_right, tf.argmax(Ys, 1)) # computed digits to be printed on the images
#superimposed_digits = correct_tags+computed_tags
superimposed_digits = tf.where(correct_prediction_s, tf.zeros_like(correct_tags),correct_tags+computed_tags) # only pring the correct and computed digits on unrecognised images
correct_bkg = tf.reshape(tf.tile([1.3,1.3,1.3], [28*28]), [1, 28,28,3]) # white background
incorrect_bkg = tf.reshape(tf.tile([1.3,1.0,1.0], [28*28]), [1, 28,28,3]) # red background
recognised_bkg = tf.gather(tf.concat([incorrect_bkg, correct_bkg], 0), tf.cast(correct_prediction_s, tf.int32)) # pick either the red or the white background depending on recognised status
I = tf.image.grayscale_to_rgb(Xs)
I = ((1-(I+superimposed_digits))*recognised_bkg)/1.3 # stencil extra data on top of images and reorder them unrecognised first
I = tf.image.convert_image_dtype(I, tf.uint8, saturate=True)
Islices = [] # 100 images => 10x10 image block
for imslice in range(lines):
Islices.append(tf.concat(tf.unstack(tf.slice(I, [imslice*n//lines,0,0,0], [n//lines,28,28,3])), 1))
I = tf.concat(Islices, 0)
return I
# n = HISTOGRAM_BUCKETS (global)
# Buckets the data into n buckets so that there are an equal number of data points in
# each bucket. Returns n+1 bucket boundaries. Spreads the reaminder data.size % n more
# or less evenly among the central buckets.
# data: 1-D ndarray containing float data, MUST BE SORTED in ascending order
# n: integer, the number of desired output buckets
# return value: ndarray, 1-D vector of size n+1 containing the bucket boundaries
# the first value is the min of the data, the last value is the max
def accuracy(logits, labels):
"""Calculates aggregated accuracy."""
is_correct = tf.nn.in_top_k(logits, labels, 1)
correct = tf.reduce_sum(tf.cast(is_correct, tf.int32))
incorrect = tf.reduce_sum(tf.cast(tf.logical_not(is_correct), tf.int32))
correct_count = tf.Variable(0, False)
incorrect_count = tf.Variable(0, False)
correct_count_update = tf.assign_add(correct_count, correct)
incorrect_count_update = tf.assign_add(incorrect_count, incorrect)
accuracy_op = tf.cast(correct_count, tf.float32) / tf.cast(
correct_count + incorrect_count, tf.float32)
return [correct_count_update, incorrect_count_update], accuracy_op
def accuracy(logits, labels):
"""Calculates aggregated accuracy."""
is_correct = tf.nn.in_top_k(logits, labels, 1)
correct = tf.reduce_sum(tf.cast(is_correct, tf.int32))
incorrect = tf.reduce_sum(tf.cast(tf.logical_not(is_correct), tf.int32))
correct_count = tf.Variable(0, False)
incorrect_count = tf.Variable(0, False)
correct_count_update = tf.assign_add(correct_count, correct)
incorrect_count_update = tf.assign_add(incorrect_count, incorrect)
accuracy_op = tf.cast(correct_count, tf.float32) / tf.cast(
correct_count + incorrect_count, tf.float32)
return [correct_count_update, incorrect_count_update], accuracy_op
def accuracy(logits, labels):
"""Calculates aggregated accuracy."""
is_correct = tf.nn.in_top_k(logits, labels, 1)
correct = tf.reduce_sum(tf.cast(is_correct, tf.int32))
incorrect = tf.reduce_sum(tf.cast(tf.logical_not(is_correct), tf.int32))
correct_count = tf.Variable(0, False)
incorrect_count = tf.Variable(0, False)
correct_count_update = tf.assign_add(correct_count, correct)
incorrect_count_update = tf.assign_add(incorrect_count, incorrect)
accuracy_op = tf.cast(correct_count, tf.float32) / tf.cast(
correct_count + incorrect_count, tf.float32)
return [correct_count_update, incorrect_count_update], accuracy_op
def _get_cubic_root(self):
"""Get the cubic root."""
# We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
# where x = sqrt(mu).
# We substitute x, which is sqrt(mu), with x = y + 1.
# It gives y^3 + py = q
# where p = (D^2 h_min^2)/(2*C) and q = -p.
# We use the Vieta's substution to compute the root.
# There is only one real solution y (which is in [0, 1] ).
# http://mathworld.wolfram.com/VietasSubstitution.html
assert_array = [
tf.Assert(
tf.logical_not(tf.is_nan(self._dist_to_opt_avg)),
[self._dist_to_opt_avg,]),
tf.Assert(
tf.logical_not(tf.is_nan(self._h_min)),
[self._h_min,]),
tf.Assert(
tf.logical_not(tf.is_nan(self._grad_var)),
[self._grad_var,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._dist_to_opt_avg)),
[self._dist_to_opt_avg,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._h_min)),
[self._h_min,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._grad_var)),
[self._grad_var,])
]
with tf.control_dependencies(assert_array):
p = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0)
y = w - p / 3.0 / w
x = y + 1
return x
def accuracy(logits, labels):
"""Calculates aggregated accuracy."""
is_correct = tf.nn.in_top_k(logits, labels, 1)
correct = tf.reduce_sum(tf.cast(is_correct, tf.int32))
incorrect = tf.reduce_sum(tf.cast(tf.logical_not(is_correct), tf.int32))
correct_count = tf.Variable(0, False)
incorrect_count = tf.Variable(0, False)
correct_count_update = tf.assign_add(correct_count, correct)
incorrect_count_update = tf.assign_add(incorrect_count, incorrect)
accuracy_op = tf.cast(correct_count, tf.float32) / tf.cast(
correct_count + incorrect_count, tf.float32)
return [correct_count_update, incorrect_count_update], accuracy_op
def init_cov_matrix_tf(predictions, correct_predictions):
true_pred = tf.boolean_mask(predictions, correct_predictions)
false_pred = tf.boolean_mask(predictions, tf.logical_not(correct_predictions))
truePos = tf.reduce_sum(tf.cast(tf.equal(true_pred, 1), tf.float32))
falsePos = tf.reduce_sum(tf.cast(tf.equal(false_pred, 1), tf.float32))
trueNeg = tf.reduce_sum(tf.cast(tf.equal(true_pred, 0), tf.float32))
falseNeg = tf.reduce_sum(tf.cast(tf.equal(false_pred, 0), tf.float32))
return (truePos, falsePos, trueNeg, falseNeg)
def time_gate_fast_2(phase, r_on, leak_rate, training_phase):
if not training_phase:
leak_rate = 1.0
is_up = tf.less(phase, (r_on * 0.5))
is_down = tf.logical_and(tf.less(phase, r_on), tf.logical_not(is_up))
time_gate = tf.where(is_up, 2 * phase / r_on, tf.where(is_down, 2. - 2. * (phase / r_on), leak_rate * phase))
return time_gate
tensorflowvisu.py 文件源码
项目:tensorflow-mnist-tutorial
作者: martin-gorner
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def tf_format_mnist_images(X, Y, Y_, n=100, lines=10):
correct_prediction = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))
correctly_recognised_indices = tf.squeeze(tf.where(correct_prediction), [1]) # indices of correctly recognised images
incorrectly_recognised_indices = tf.squeeze(tf.where(tf.logical_not(correct_prediction)), [1]) # indices of incorrectly recognised images
everything_incorrect_first = tf.concat([incorrectly_recognised_indices, correctly_recognised_indices], 0) # images reordered with indeces of unrecognised images first
everything_incorrect_first = tf.slice(everything_incorrect_first, [0], [n]) # compute first 100 only - no space to display more anyway
# compute n=100 digits to display only
Xs = tf.gather(X, everything_incorrect_first)
Ys = tf.gather(Y, everything_incorrect_first)
Ys_ = tf.gather(Y_, everything_incorrect_first)
correct_prediction_s = tf.gather(correct_prediction, everything_incorrect_first)
digits_left = tf.image.grayscale_to_rgb(tensorflowvisu_digits.digits_left())
correct_tags = tf.gather(digits_left, tf.argmax(Ys_, 1)) # correct digits to be printed on the images
digits_right = tf.image.grayscale_to_rgb(tensorflowvisu_digits.digits_right())
computed_tags = tf.gather(digits_right, tf.argmax(Ys, 1)) # computed digits to be printed on the images
#superimposed_digits = correct_tags+computed_tags
superimposed_digits = tf.where(correct_prediction_s, tf.zeros_like(correct_tags),correct_tags+computed_tags) # only pring the correct and computed digits on unrecognised images
correct_bkg = tf.reshape(tf.tile([1.3,1.3,1.3], [28*28]), [1, 28,28,3]) # white background
incorrect_bkg = tf.reshape(tf.tile([1.3,1.0,1.0], [28*28]), [1, 28,28,3]) # red background
recognised_bkg = tf.gather(tf.concat([incorrect_bkg, correct_bkg], 0), tf.cast(correct_prediction_s, tf.int32)) # pick either the red or the white background depending on recognised status
I = tf.image.grayscale_to_rgb(Xs)
I = ((1-(I+superimposed_digits))*recognised_bkg)/1.3 # stencil extra data on top of images and reorder them unrecognised first
I = tf.image.convert_image_dtype(I, tf.uint8, saturate=True)
Islices = [] # 100 images => 10x10 image block
for imslice in range(lines):
Islices.append(tf.concat(tf.unstack(tf.slice(I, [imslice*n//lines,0,0,0], [n//lines,28,28,3])), 1))
I = tf.concat(Islices, 0)
return I
# n = HISTOGRAM_BUCKETS (global)
# Buckets the data into n buckets so that there are an equal number of data points in
# each bucket. Returns n+1 bucket boundaries. Spreads the reaminder data.size % n more
# or less evenly among the central buckets.
# data: 1-D ndarray containing float data, MUST BE SORTED in ascending order
# n: integer, the number of desired output buckets
# return value: ndarray, 1-D vector of size n+1 containing the bucket boundaries
# the first value is the min of the data, the last value is the max
def test_decode_one_step(self):
"""Default test for the DynamicDecoder.decode() method."""
init_value = [[.1, .1], [.2, .2], [.3, .3]]
init_input = tf.constant(init_value)
init_state = 2 * init_input
next_input = 3 * init_input
next_state = 4 * init_input
output = 10 * init_input
finished = tf.constant([False, False, False], dtype=tf.bool)
zero_output = tf.zeros_like(output)
decoder = mock.Mock()
decoder.init_input.side_effect = [init_input]
decoder.init_state.side_effect = [init_state]
decoder.zero_output.side_effect = [zero_output]
decoder.step.side_effect = [(output, next_input, next_state, finished)]
helper = mock.Mock()
helper.finished.side_effect = [tf.logical_not(finished)] # exit from the loop!
dyndec = layers.DynamicDecoder(decoder, helper)
output_t, state_t = dyndec.decode()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
output_act, state_act = sess.run([output_t, state_t])
# assertions on output.
output_exp = 10 * np.transpose(np.asarray([init_value]), (1, 0, 2))
self.assertAllClose(output_exp, output_act)
state_exp = 4 * np.asarray(init_value)
self.assertAllClose(state_exp, state_act)
# mock assertions.
# we cannot assert more than this since the while
# loop makes all the ops non-fetchable.
decoder.init_input.assert_called_once()
decoder.init_state.assert_called_once()
decoder.zero_output.assert_called_once()
decoder.step.assert_called_once()
helper.finished.assert_called_once()
def build_graph(self, nn_im_w, nn_im_h, num_colour_channels=3, weights=None, biases=None):
num_outputs = 1 #ofc
self.nn_im_w = nn_im_w
self.nn_im_h = nn_im_h
if weights is None:
weights = [None, None, None, None, None]
if biases is None:
biases = [None, None, None, None, None]
with tf.device('/cpu:0'):
# Placeholder variables for the input image and output images
self.x = tf.placeholder(tf.float32, shape=[None, nn_im_w*nn_im_h*3])
self.y_ = tf.placeholder(tf.float32, shape=[None, num_outputs])
self.threshold = tf.placeholder(tf.float32)
# Build the convolutional and pooling layers
conv1_output_channels = 32
conv2_output_channels = 16
conv3_output_channels = 8
conv_layer_1_input = tf.reshape(self.x, [-1, nn_im_h, nn_im_w, num_colour_channels]) #The resized input image
self.build_conv_layer(conv_layer_1_input, num_colour_channels, conv1_output_channels, initial_weights=weights[0], initial_biases=biases[0]) # layer 1
self.build_conv_layer(self.layers[0][0], conv1_output_channels, conv2_output_channels, initial_weights=weights[1], initial_biases=biases[1])# layer 2
self.build_conv_layer(self.layers[1][0], conv2_output_channels, conv3_output_channels, initial_weights=weights[2], initial_biases=biases[2])# layer 3
# Build the fully connected layer
convnet_output_w = nn_im_w//8
convnet_output_h = nn_im_h//8
fully_connected_layer_input = tf.reshape(self.layers[2][0], [-1, convnet_output_w * convnet_output_h * conv3_output_channels])
self.build_fully_connected_layer(fully_connected_layer_input, convnet_output_w, convnet_output_h, conv3_output_channels, initial_weights=weights[3], initial_biases=biases[3])
# The dropout stage and readout layer
self.keep_prob, self.h_drop = self.dropout(self.layers[3][0])
self.y_conv,_,_ = self.build_readout_layer(self.h_drop, num_outputs, initial_weights=weights[4], initial_biases=biases[4])
self.mean_error = tf.sqrt(tf.reduce_mean(tf.square(self.y_ - self.y_conv)))
self.train_step = tf.train.AdamOptimizer(1e-4).minimize(self.mean_error)
self.accuracy = (1.0 - tf.reduce_mean(tf.abs(self.y_ - tf.round(self.y_conv))))
positive_examples = tf.greater_equal(self.y_, 0.5)
negative_examples = tf.logical_not(positive_examples)
positive_classifications = tf.greater_equal(self.y_conv, self.threshold)
negative_classifications = tf.logical_not(positive_classifications)
self.true_positive = tf.reduce_sum(tf.cast(tf.logical_and(positive_examples, positive_classifications),tf.int32)) # count the examples that are positive and classified as positive
self.false_positive = tf.reduce_sum(tf.cast(tf.logical_and(negative_examples, positive_classifications),tf.int32)) # count the examples that are negative but classified as positive
self.true_negative = tf.reduce_sum(tf.cast(tf.logical_and(negative_examples, negative_classifications),tf.int32)) # count the examples that are negative and classified as negative
self.false_negative = tf.reduce_sum(tf.cast(tf.logical_and(positive_examples, negative_classifications),tf.int32)) # count the examples that are positive but classified as negative
self.positive_count = tf.reduce_sum(tf.cast(positive_examples, tf.int32)) # count the examples that are positive
self.negative_count = tf.reduce_sum(tf.cast(negative_examples, tf.int32)) # count the examples that are negative
self.confusion_matrix = tf.reshape(tf.pack([self.true_positive, self.false_positive, self.false_negative, self.true_negative]), [2,2])
self.sess.run(tf.initialize_all_variables())
def main():
""" Test an RNN trained for TIMIT phoneme recognition. """
args, params_str, layer_kwargs = parse_args()
_, _, test_inputs, test_labels = timitphonemerec.load_split(args.data_dir, val=False,
mfcc=True, normalize=True)
# Input seqs have shape [length, INPUT_SIZE]. Label seqs are int8 arrays with shape [length],
# but need to have shape [length, 1] for the batch generator.
test_labels = [seq[:, np.newaxis] for seq in test_labels]
test_batches = utils.full_bptt_batch_generator(test_inputs, test_labels, TEST_BATCH_SIZE,
num_epochs=1, shuffle=False)
model = models.RNNClassificationModel(args.layer_type, INPUT_SIZE, TARGET_SIZE, args.num_hidden_units,
args.activation_type, **layer_kwargs)
def _error_rate(valid_predictions, valid_targets):
incorrect_mask = tf.logical_not(tf.equal(tf.argmax(valid_predictions, 1), tf.argmax(valid_targets, 1)))
return tf.reduce_mean(tf.to_float(incorrect_mask))
model.error_rate = _error_rate(model.valid_predictions, model.valid_targets)
config = tf.ConfigProto()
config.gpu_options.allow_growth = False
sess = tf.Session(config=config)
saver = tf.train.Saver()
saver.restore(sess, os.path.join(args.results_dir, 'model.ckpt'))
batch_inputs, batch_labels = next(test_batches)
batch_targets = utils.one_hot(np.squeeze(batch_labels, 2), TARGET_SIZE)
valid_predictions, valid_targets, error_rate = sess.run(
[model.valid_predictions, model.valid_targets, model.error_rate],
feed_dict={model.inputs: batch_inputs,
model.targets: batch_targets}
)
print('%f' % error_rate)
with open(os.path.join(args.results_dir, 'test_result.txt'), 'w') as f:
print('%f' % error_rate, file=f)
def main():
""" Test an RNN for sequential (possibly permuted) MNIST recognition. """
args, params_str, layer_kwargs = parse_args()
outs = mnist.load_split(args.data_dir, val=False, permute=args.permute, normalize=True, seed=0)
_, _, test_images, test_labels = outs
# Flatten the images.
test_inputs = test_images.reshape([len(test_images), -1, INPUT_SIZE])
# Align sequence-level labels with the appropriate time steps by padding with NaNs,
# and to do so, first convert the labels to floats.
length = test_inputs.shape[1]
pad = lambda x: np.pad(x, [[0, 0], [length - 1, 0], [0, 0]], mode='constant', constant_values=np.nan)
test_labels = pad(test_labels.reshape([-1, 1, 1]).astype(np.float))
test_batches = utils.full_bptt_batch_generator(test_inputs, test_labels, TEST_BATCH_SIZE, num_epochs=1,
shuffle=False)
model = models.RNNClassificationModel(args.layer_type, INPUT_SIZE, TARGET_SIZE, args.num_hidden_units,
args.activation_type, **layer_kwargs)
def _error_rate(valid_predictions, valid_targets):
incorrect_mask = tf.logical_not(tf.equal(tf.argmax(valid_predictions, 1), tf.argmax(valid_targets, 1)))
return tf.reduce_mean(tf.to_float(incorrect_mask))
model.error_rate = _error_rate(model.valid_predictions, model.valid_targets)
config = tf.ConfigProto()
config.gpu_options.allow_growth = False
sess = tf.Session(config=config)
saver = tf.train.Saver()
saver.restore(sess, os.path.join(args.results_dir, 'model.ckpt'))
error_rates = []
for batch_inputs, batch_labels in test_batches:
batch_targets = utils.one_hot(np.squeeze(batch_labels, 2), TARGET_SIZE)
valid_predictions, valid_targets, batch_error_rates = sess.run(
[model.valid_predictions, model.valid_targets, model.error_rate],
feed_dict={model.inputs: batch_inputs,
model.targets: batch_targets}
)
error_rates.append(batch_error_rates)
error_rate = np.mean(error_rates, dtype=np.float)
print('%f' % error_rate)
with open(os.path.join(args.results_dir, 'test_result.txt'), 'w') as f:
print('%f' % error_rate, file=f)