def greater(x, y):
'''Element-wise truth value of (x > y).
Returns a bool tensor.
'''
return tf.greater(x, y)
python类greater()的实例源码
def nms(self, localization, confidence, tiling):
good_bboxes = decode_bboxes(localization, tiling)
not_crap_mask = tf.reduce_max(confidence[:, 1:], axis=-1) >= args.conf_thresh
good_bboxes = tf.boolean_mask(good_bboxes, not_crap_mask)
confidence = tf.boolean_mask(confidence, not_crap_mask)
self.detection_list = []
self.score_list = []
for i in range(1, self.loader.num_classes):
class_mask = tf.greater(confidence[:, i], args.conf_thresh)
class_scores = tf.boolean_mask(confidence[:, i], class_mask)
class_bboxes = tf.boolean_mask(good_bboxes, class_mask)
K = tf.minimum(tf.size(class_scores), args.top_k_nms)
_, top_k_inds = tf.nn.top_k(class_scores, K)
top_class_scores = tf.gather(class_scores, top_k_inds)
top_class_bboxes = tf.gather(class_bboxes, top_k_inds)
final_inds = tf.image.non_max_suppression(top_class_bboxes,
top_class_scores,
max_output_size=args.top_k_after_nms,
iou_threshold=args.nms_thresh)
final_class_bboxes = tf.gather(top_class_bboxes, final_inds)
final_scores = tf.gather(top_class_scores, final_inds)
self.detection_list.append(final_class_bboxes)
self.score_list.append(final_scores)
def binary(config, gan, net):
net = tf.greater(net, 0)
net = tf.cast(net, tf.float32)
return net
def detectMinVal(input_mat, var, threshold=1e-6, name='', debug=False):
eigen_min = tf.reduce_min(input_mat)
eigen_max = tf.reduce_max(input_mat)
eigen_ratio = eigen_max / eigen_min
input_mat_clipped = clipoutNeg(input_mat, threshold)
if debug:
input_mat_clipped = tf.cond(tf.logical_or(tf.greater(eigen_ratio, 0.), tf.less(eigen_ratio, -500)), lambda: input_mat_clipped, lambda: tf.Print(
input_mat_clipped, [tf.convert_to_tensor('screwed ratio ' + name + ' eigen values!!!'), tf.convert_to_tensor(var.name), eigen_min, eigen_max, eigen_ratio]))
return input_mat_clipped
def f1_score(precision, recall):
"""Creates an op for calculating the F1 score.
Args:
precision: A tensor representing precision.
recall: A tensor representing recall.
Returns:
A tensor with the result of the F1 calculation.
"""
return tf.where(
tf.greater(precision + recall, 0), 2 * (
(precision * recall) / (precision + recall)), 0)
def accuracy_without_true_negatives(true_positives, false_positives,
false_negatives):
"""Creates an op for calculating accuracy without true negatives.
Args:
true_positives: A tensor representing true_positives.
false_positives: A tensor representing false_positives.
false_negatives: A tensor representing false_negatives.
Returns:
A tensor with the result of the calculation.
"""
return tf.where(
tf.greater(true_positives + false_positives + false_negatives, 0),
true_positives / (true_positives + false_positives + false_negatives), 0)
def _frame_metrics(frame_labels, frame_predictions):
"""Calculate frame-based metrics."""
frame_labels_bool = tf.cast(frame_labels, tf.bool)
frame_predictions_bool = tf.cast(frame_predictions, tf.bool)
frame_true_positives = tf.reduce_sum(tf.to_float(tf.logical_and(
tf.equal(frame_labels_bool, True),
tf.equal(frame_predictions_bool, True))))
frame_false_positives = tf.reduce_sum(tf.to_float(tf.logical_and(
tf.equal(frame_labels_bool, False),
tf.equal(frame_predictions_bool, True))))
frame_false_negatives = tf.reduce_sum(tf.to_float(tf.logical_and(
tf.equal(frame_labels_bool, True),
tf.equal(frame_predictions_bool, False))))
frame_accuracy = tf.reduce_sum(tf.to_float(
tf.equal(frame_labels_bool, frame_predictions_bool)))
frame_precision = tf.where(
tf.greater(frame_true_positives + frame_false_positives, 0),
tf.div(frame_true_positives,
frame_true_positives + frame_false_positives),
0)
frame_recall = tf.where(
tf.greater(frame_true_positives + frame_false_negatives, 0),
tf.div(frame_true_positives,
frame_true_positives + frame_false_negatives),
0)
frame_f1_score = f1_score(frame_precision, frame_recall)
frame_accuracy_without_true_negatives = accuracy_without_true_negatives(
frame_true_positives, frame_false_positives, frame_false_negatives)
return {
'true_positives': frame_true_positives,
'false_positives': frame_false_positives,
'false_negatives': frame_false_negatives,
'accuracy': frame_accuracy,
'accuracy_without_true_negatives': frame_accuracy_without_true_negatives,
'precision': frame_precision,
'recall': frame_recall,
'f1_score': frame_f1_score,
}
def calc_errors(self, output):
Z = output['numbers']
N = tf.reduce_sum(tf.cast(tf.greater(Z, 0), tf.float32), 1)
tgt = output[self.target]
pred = output[self.prediction]
if self.idx is not None:
tgt = tgt[:, self.idx]
pred = pred[:, self.idx]
return tf.abs(tgt - pred) / N
def calc_errors(self, output):
Z = output['numbers']
N = tf.reduce_sum(tf.cast(tf.greater(Z, 0), tf.float32), 1)
tgt = output[self.target]
pred = output[self.prediction]
if self.idx is not None:
tgt = tgt[:, self.idx]
pred = pred[:, self.idx]
return ((tgt - pred) / N) ** 2
def __gt__(self, other):
return tf.greater(self, other)
text_classification_model_han.py 文件源码
项目:kaggle_redefining_cancer_treatment
作者: jorgemf
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def _embed_sequence_with_length(self, embeddings, input_text):
# calculate max length of the input_text
mask_words = tf.greater(input_text, 0) # true for words false for padding
words_length = tf.reduce_sum(tf.cast(mask_words, tf.int32), -1)
mask_sentences = tf.greater(words_length, 0)
sentences_length = tf.reduce_sum(tf.cast(mask_sentences, tf.int32), 1)
input_text = tf.add(input_text, 1)
embedded_sequence = tf.nn.embedding_lookup(embeddings, input_text)
return embedded_sequence, sentences_length, words_length
def __call__(self, inputs, state, scope=None):
with tf.variable_scope(scope or type(self).__name__, initializer=self._initializer):
# Split the hidden state into blocks (each U, V, W are shared across blocks).
U = tf.get_variable('U', [self._num_units_per_block, self._num_units_per_block])
V = tf.get_variable('V', [self._num_units_per_block, self._num_units_per_block])
W = tf.get_variable('W', [self._num_units_per_block, self._num_units_per_block])
b = tf.get_variable('biasU',[self._num_units_per_block])
state = tf.split(state, self._num_blocks, 1)
next_states = []
for j, state_j in enumerate(state): # Hidden State (j)
key_j = self._keys[j]
gate_j = self.get_gate(state_j, key_j, inputs)
candidate_j = self.get_candidate(state_j, key_j, inputs, U, V, W, b)
# Equation 4: h_j <- h_j + g_j * h_j^~
# Perform an update of the hidden state (memory).
state_j_next = state_j + tf.expand_dims(gate_j, -1) * candidate_j
# # Forget previous memories by normalization.
state_j_next = tf.nn.l2_normalize(state_j_next, -1) # TODO: Is epsilon necessary?
# Equation 5: h_j <- h_j / \norm{h_j}
# Forget previous memories by normalization.
# state_j_next_norm = tf.norm(tensor=state_j_next,
# ord='euclidean',
# axis=-1,
# keep_dims=True)
# state_j_next_norm = tf.where(
# tf.greater(state_j_next_norm, 0.0),
# state_j_next_norm,
# tf.ones_like(state_j_next_norm))
# state_j_next = state_j_next / state_j_next_norm
next_states.append(state_j_next)
state_next = tf.concat(next_states, 1)
return state_next, state_next
def __call__(self, inputs, state, scope=None):
with tf.variable_scope(scope or type(self).__name__, initializer=self._initializer):
# Split the hidden state into blocks (each U, V, W are shared across blocks).
U = tf.get_variable('U', [self._num_units_per_block, self._num_units_per_block])
V = tf.get_variable('V', [self._num_units_per_block, self._num_units_per_block])
W = tf.get_variable('W', [self._num_units_per_block, self._num_units_per_block])
b = tf.get_variable('biasU',[self._num_units_per_block])
state = tf.split(state, self._num_blocks, 1)
next_states = []
for j, state_j in enumerate(state): # Hidden State (j)
key_j = self._keys[j]
gate_j = self.get_gate(state_j, key_j, inputs)
candidate_j = self.get_candidate(state_j, key_j, inputs, U, V, W, b)
# Equation 4: h_j <- h_j + g_j * h_j^~
# Perform an update of the hidden state (memory).
state_j_next = state_j + tf.expand_dims(gate_j, -1) * candidate_j
# # Forget previous memories by normalization.
# Equation 5: h_j <- h_j / \norm{h_j}
state_j_next = tf.nn.l2_normalize(state_j_next, -1) # TODO: Is epsilon necessary?
# Forget previous memories by normalization.
# state_j_next_norm = tf.norm(tensor=state_j_next,
# ord='euclidean',
# axis=-1,
# keep_dims=True)
# state_j_next_norm = tf.where(
# tf.greater(state_j_next_norm, 0.0),
# state_j_next_norm,
# tf.ones_like(state_j_next_norm))
# state_j_next = state_j_next / state_j_next_norm
next_states.append(state_j_next)
state_next = tf.concat(next_states, 1)
return state_next, state_next
def get_eval_ops(logits, labels, one_hot=False, scope='', calc_accuracy=True):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the
range [0, NUM_CLASSES).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
print('Evaluation Ops..')
with tf.name_scope(scope):
# For a classifier model, we can use the in_top_k Op.
# It returns a bool tensor with shape [batch_size] that is true for
# the examples where the label's is was in the top k (here k=1)
# of all logits for that example.
# labels = tf.cast(labels, tf.int64)
if one_hot:
labels = tf.argmax(labels, 1)
top_1_op = tf.nn.in_top_k(logits, labels, 1)
num_correct = tf.reduce_sum(tf.cast(top_1_op, tf.float32))
if calc_accuracy:
acc_percent = tf.divide(num_correct, labels.shape[0].value)
else:
acc_percent = tf.constant(0.0)
# =============
y_const = tf.constant(-1, dtype=labels.dtype)
y_greater = tf.greater(labels, y_const)
n_all = tf.reduce_sum(tf.cast(y_greater, tf.float32))
return top_1_op, acc_percent * 100.0, num_correct, n_all, labels
########################################################################
def huber_loss(x, delta=1):
coef = 0.5
l2_mask = tf.less_equal(tf.abs(x), delta)
l1_mask = tf.greater(tf.abs(x), delta)
term_1 = tf.reduce_sum(coef * tf.square(tf.boolean_mask(x, l2_mask)))
term_2 = tf.reduce_sum(delta * (tf.abs(tf.boolean_mask(x, l1_mask)) - coef * delta))
return term_1 + term_2
def _box_params_loss(self, ground_truth, ground_truth_num,
anchor_centers, offsets, proposals_num):
# ground_truth shape is M x 4, where M is count and 4 are y,x,h,w
ground_truth = tf.expand_dims(ground_truth, axis=0)
ground_truth = tf.tile(ground_truth, [proposals_num, 1, 1])
# anchor_centers shape is N x 4 where N is count and 4 are ya,xa,ha,wa
anchor_centers = tf.expand_dims(anchor_centers, axis=1)
anchor_centers = tf.tile(anchor_centers, [1, ground_truth_num, 1])
# pos_sample_mask shape is N x M, True are for positive proposals and, hence,
# for anchor centers
pos_sample_mask = tf.greater(self.iou_metric, 0.7)
# convert mask shape from N to N x 1 to make it broadcastable with pos_sample_mask
mask = tf.expand_dims(self.cross_boundary_mask, axis=1)
# convert resulting shape to align it with offsets
mask = tf.expand_dims(tf.cast(pos_sample_mask & mask, tf.float32), axis=2)
y_anchor, x_anchor, height_anchor, width_anchor = tf.unstack(anchor_centers, axis=2)
y_ground_truth, x_ground_truth, height_ground_truth, width_ground_truth = tf.unstack(
ground_truth, axis=2)
# idea is to calculate N x M tx, ty, tw, th for ground truth boxes
# for every proposal. Then we caclulate loss, multiply it with mask
# to filter out non-positive samples and sum to one
# each shape is N x M
tx_ground_truth = (x_ground_truth - x_anchor) / width_anchor
ty_ground_truth = (y_ground_truth - y_anchor) / height_anchor
tw_ground_truth = tf.log(width_ground_truth / width_anchor)
th_ground_truth = tf.log(height_ground_truth / height_anchor)
gt_params = tf.stack(
[ty_ground_truth, tx_ground_truth, th_ground_truth, tw_ground_truth], axis=2)
offsets = tf.expand_dims(offsets, axis=1)
offsets = tf.tile(offsets, [1, ground_truth_num, 1])
return huber_loss((offsets - gt_params) * mask)
def _safe_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
return tf.where(
math_ops.greater(denominator, 0),
math_ops.divide(numerator, denominator),
tf.zeros_like(numerator),
name=name)
def tf_ssd_bboxes_select_layer_all_classes(predictions_layer, localizations_layer,
select_threshold=None):
"""Extract classes, scores and bounding boxes from features in one layer.
Batch-compatible: inputs are supposed to have batch-type shapes.
Args:
predictions_layer: A SSD prediction layer;
localizations_layer: A SSD localization layer;
select_threshold: Classification threshold for selecting a box. If None,
select boxes whose classification score is higher than 'no class'.
Return:
classes, scores, bboxes: Input Tensors.
"""
# Reshape features: Batches x N x N_labels | 4
p_shape = tfe.get_shape(predictions_layer)
predictions_layer = tf.reshape(predictions_layer,
tf.stack([p_shape[0], -1, p_shape[-1]]))
l_shape = tfe.get_shape(localizations_layer)
localizations_layer = tf.reshape(localizations_layer,
tf.stack([l_shape[0], -1, l_shape[-1]]))
# Boxes selection: use threshold or score > no-label criteria.
if select_threshold is None or select_threshold == 0:
# Class prediction and scores: assign 0. to 0-class
classes = tf.argmax(predictions_layer, axis=2)
scores = tf.reduce_max(predictions_layer, axis=2)
scores = scores * tf.cast(classes > 0, scores.dtype)
else:
sub_predictions = predictions_layer[:, :, 1:]
classes = tf.argmax(sub_predictions, axis=2) + 1
scores = tf.reduce_max(sub_predictions, axis=2)
# Only keep predictions higher than threshold.
mask = tf.greater(scores, select_threshold)
classes = classes * tf.cast(mask, classes.dtype)
scores = scores * tf.cast(mask, scores.dtype)
# Assume localization layer already decoded.
bboxes = localizations_layer
return classes, scores, bboxes
def _fit(tensor, width):
actual = tf.shape(tensor)[-1]
result = tf.cond(tf.greater(actual, width),
lambda: _trim(tensor, width),
lambda: _pad(tensor, width))
return result
def gt(self, x, y):
return tf.greater(x, y)