def _leapfrog(self, q, p, step_size, get_gradient, mass):
def loop_cond(i, q, p):
return i < self.n_leapfrogs + 1
def loop_body(i, q, p):
step_size1 = tf.cond(i > 0,
lambda: step_size,
lambda: tf.constant(0.0, dtype=tf.float32))
step_size2 = tf.cond(tf.logical_and(tf.less(i, self.n_leapfrogs),
tf.less(0, i)),
lambda: step_size,
lambda: step_size / 2)
q, p = leapfrog_integrator(q, p, step_size1, step_size2,
lambda q: get_gradient(q), mass)
return [i + 1, q, p]
i = tf.constant(0)
_, q, p = tf.while_loop(loop_cond,
loop_body,
[i, q, p],
back_prop=False,
parallel_iterations=1)
return q, p
python类logical_and()的实例源码
def value_transition(self, curr_state, next_symbols, batch_size):
first_value_token = self.num_functions + self.num_begin_tokens + self.num_control_tokens
num_value_tokens = self.output_size - first_value_token
with tf.name_scope('grammar_transition'):
adjusted_next_symbols = tf.where(next_symbols >= self.num_control_tokens, next_symbols + (first_value_token - self.num_control_tokens), next_symbols)
assert1 = tf.Assert(tf.reduce_all(tf.logical_and(next_symbols < num_value_tokens, next_symbols >= 0)), [curr_state, next_symbols])
with tf.control_dependencies([assert1]):
transitions = tf.gather(tf.constant(self.transition_matrix), curr_state)
assert transitions.get_shape()[1:] == (self.output_size,)
indices = tf.stack((tf.range(0, batch_size), adjusted_next_symbols), axis=1)
next_state = tf.gather_nd(transitions, indices)
assert2 = tf.Assert(tf.reduce_all(next_state >= 0), [curr_state, adjusted_next_symbols, next_state])
with tf.control_dependencies([assert2]):
return tf.identity(next_state)
def OHNM_single_image(scores, n_pos, neg_mask):
"""Online Hard Negative Mining.
scores: the scores of being predicted as negative cls
n_pos: the number of positive samples
neg_mask: mask of negative samples
Return:
the mask of selected negative samples.
if n_pos == 0, no negative samples will be selected.
"""
def has_pos():
n_neg = n_pos * 3
max_neg_entries = tf.reduce_sum(tf.cast(neg_mask, tf.int32))
n_neg = tf.minimum(n_neg, max_neg_entries)
n_neg = tf.cast(n_neg, tf.int32)
neg_conf = tf.boolean_mask(scores, neg_mask)
vals, _ = tf.nn.top_k(-neg_conf, k=n_neg)
threshold = vals[-1]# a negtive value
selected_neg_mask = tf.logical_and(neg_mask, scores <= -threshold)
return tf.cast(selected_neg_mask, tf.float32)
def no_pos():
return tf.zeros_like(neg_mask, tf.float32)
return tf.cond(n_pos > 0, has_pos, no_pos)
def get_acceptance_rate(q, p, new_q, new_p, log_posterior, mass, data_axes):
old_hamiltonian, old_log_prob = hamiltonian(
q, p, log_posterior, mass, data_axes)
new_hamiltonian, new_log_prob = hamiltonian(
new_q, new_p, log_posterior, mass, data_axes)
old_log_prob = tf.check_numerics(
old_log_prob,
'HMC: old_log_prob has numeric errors! Try better initialization.')
acceptance_rate = tf.exp(
tf.minimum(-new_hamiltonian + old_hamiltonian, 0.0))
is_finite = tf.logical_and(tf.is_finite(acceptance_rate),
tf.is_finite(new_log_prob))
acceptance_rate = tf.where(is_finite, acceptance_rate,
tf.zeros_like(acceptance_rate))
return old_hamiltonian, new_hamiltonian, old_log_prob, new_log_prob, \
acceptance_rate
def _deepfool2(model, x, epochs, eta, clip_min, clip_max, min_prob):
y0 = tf.stop_gradient(tf.reshape(model(x), [-1])[0])
y0 = tf.to_int32(tf.greater(y0, 0.5))
def _cond(i, z):
xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max)
y = tf.stop_gradient(tf.reshape(model(xadv), [-1])[0])
y = tf.to_int32(tf.greater(y, 0.5))
return tf.logical_and(tf.less(i, epochs), tf.equal(y0, y))
def _body(i, z):
xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max)
y = tf.reshape(model(xadv), [-1])[0]
g = tf.gradients(y, xadv)[0]
dx = - y * g / tf.norm(g)
return i+1, z+dx
_, noise = tf.while_loop(_cond, _body, [0, tf.zeros_like(x)],
name='_deepfool2_impl', back_prop=False)
return noise
def _crop(image, offset_height, offset_width, crop_height, crop_width):
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
cropped_shape = control_flow_ops.with_dependencies(
[rank_assertion],
tf.stack([crop_height, crop_width, original_shape[2]]))
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
image = control_flow_ops.with_dependencies([size_assertion], tf.slice(image, offsets, cropped_shape))
return tf.reshape(image, cropped_shape)
def bboxes_filter_center(labels, bboxes, margins=[0., 0., 0., 0.],
scope=None):
"""Filter out bounding boxes whose center are not in
the rectangle [0, 0, 1, 1] + margins. The margin Tensor
can be used to enforce or loosen this condition.
Return:
labels, bboxes: Filtered elements.
"""
with tf.name_scope(scope, 'bboxes_filter', [labels, bboxes]):
cy = (bboxes[:, 0] + bboxes[:, 2]) / 2.
cx = (bboxes[:, 1] + bboxes[:, 3]) / 2.
mask = tf.greater(cy, margins[0])
mask = tf.logical_and(mask, tf.greater(cx, margins[1]))
mask = tf.logical_and(mask, tf.less(cx, 1. + margins[2]))
mask = tf.logical_and(mask, tf.less(cx, 1. + margins[3]))
# Boolean masking...
labels = tf.boolean_mask(labels, mask)
bboxes = tf.boolean_mask(bboxes, mask)
return labels, bboxes
def bboxes_filter_labels(labels, bboxes,
out_labels=[], num_classes=np.inf,
scope=None):
"""Filter out labels from a collection. Typically used to get
of DontCare elements. Also remove elements based on the number of classes.
Return:
labels, bboxes: Filtered elements.
"""
with tf.name_scope(scope, 'bboxes_filter_labels', [labels, bboxes]):
mask = tf.greater_equal(labels, num_classes)
for l in labels:
mask = tf.logical_and(mask, tf.not_equal(labels, l))
labels = tf.boolean_mask(labels, mask)
bboxes = tf.boolean_mask(bboxes, mask)
return labels, bboxes
# =========================================================================== #
# Standard boxes computation.
# =========================================================================== #
def _crop(image, offset_height, offset_width, crop_height, crop_width):
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
cropped_shape = control_flow_ops.with_dependencies(
[rank_assertion],
tf.stack([crop_height, crop_width, original_shape[2]]))
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
image = control_flow_ops.with_dependencies(
[size_assertion],
tf.slice(image, offsets, cropped_shape))
return tf.reshape(image, cropped_shape)
def bboxes_filter_center(labels, bboxes, margins=[0., 0., 0., 0.],
scope=None):
"""Filter out bounding boxes whose center are not in
the rectangle [0, 0, 1, 1] + margins. The margin Tensor
can be used to enforce or loosen this condition.
Return:
labels, bboxes: Filtered elements.
"""
with tf.name_scope(scope, 'bboxes_filter', [labels, bboxes]):
cy = (bboxes[:, 0] + bboxes[:, 2]) / 2.
cx = (bboxes[:, 1] + bboxes[:, 3]) / 2.
mask = tf.greater(cy, margins[0])
mask = tf.logical_and(mask, tf.greater(cx, margins[1]))
mask = tf.logical_and(mask, tf.less(cx, 1. + margins[2]))
mask = tf.logical_and(mask, tf.less(cx, 1. + margins[3]))
# Boolean masking...
labels = tf.boolean_mask(labels, mask)
bboxes = tf.boolean_mask(bboxes, mask)
return labels, bboxes
def bboxes_filter_labels(labels, bboxes,
out_labels=[], num_classes=np.inf,
scope=None):
"""Filter out labels from a collection. Typically used to get
of DontCare elements. Also remove elements based on the number of classes.
Return:
labels, bboxes: Filtered elements.
"""
with tf.name_scope(scope, 'bboxes_filter_labels', [labels, bboxes]):
mask = tf.greater_equal(labels, num_classes)
for l in labels:
mask = tf.logical_and(mask, tf.not_equal(labels, l))
labels = tf.boolean_mask(labels, mask)
bboxes = tf.boolean_mask(bboxes, mask)
return labels, bboxes
# =========================================================================== #
# Standard boxes computation.
# =========================================================================== #
def get_mu_tensor(self):
const_fact = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
coef = tf.Variable([-1.0, 3.0, 0.0, 1.0], dtype=tf.float32, name="cubic_solver_coef")
coef = tf.scatter_update(coef, tf.constant(2), -(3 + const_fact) )
roots = tf.py_func(np.roots, [coef], Tout=tf.complex64, stateful=False)
# filter out the correct root
root_idx = tf.logical_and(tf.logical_and(tf.greater(tf.real(roots), tf.constant(0.0) ),
tf.less(tf.real(roots), tf.constant(1.0) ) ), tf.less(tf.abs(tf.imag(roots) ), 1e-5) )
# in case there are two duplicated roots satisfying the above condition
root = tf.reshape(tf.gather(tf.gather(roots, tf.where(root_idx) ), tf.constant(0) ), shape=[] )
tf.assert_equal(tf.size(root), tf.constant(1) )
dr = self._h_max / self._h_min
mu = tf.maximum(tf.real(root)**2, ( (tf.sqrt(dr) - 1)/(tf.sqrt(dr) + 1) )**2)
return mu
def atan2(x, y, epsilon = 1.0e-12):
"""
A hack until the TensorFlow developers implement a function that can find the angle from an x and y co-
ordinate.
:param x:
:param epsilon:
:return:
"""
# Add a small number to all zeros, to avoid division by zero:
x = tf.where(tf.equal(x, 0.0), x + epsilon, x)
y = tf.where(tf.equal(y, 0.0), y + epsilon, y)
angle = tf.where(tf.greater(x, 0.0), tf.atan(y / x), tf.zeros_like(x))
angle = tf.where(tf.logical_and(tf.less(x, 0.0), tf.greater_equal(y, 0.0)), tf.atan(y / x) + np.pi, angle)
angle = tf.where(tf.logical_and(tf.less(x, 0.0), tf.less(y, 0.0)), tf.atan(y / x) - np.pi, angle)
angle = tf.where(tf.logical_and(tf.equal(x, 0.0), tf.greater(y, 0.0)), 0.5 * np.pi * tf.ones_like(x), angle)
angle = tf.where(tf.logical_and(tf.equal(x, 0.0), tf.less(y, 0.0)), -0.5 * np.pi * tf.ones_like(x), angle)
angle = tf.where(tf.logical_and(tf.equal(x, 0.0), tf.equal(y, 0.0)), tf.zeros_like(x), angle)
return angle
# List of faces for consistent ordering.
def tf_next_step(self, x, iteration, conjugate, residual, squared_residual):
"""
Termination condition: max number of iterations, or residual sufficiently small.
Args:
x: Current solution estimate $x_t$.
iteration: Current iteration counter $t$.
conjugate: Current conjugate $c_t$.
residual: Current residual $r_t$.
squared_residual: Current squared residual $r_t^2$.
Returns:
True if another iteration should be performed.
"""
next_step = super(ConjugateGradient, self).tf_next_step(x, iteration, conjugate, residual, squared_residual)
return tf.logical_and(x=next_step, y=(squared_residual >= util.epsilon))
def bboxes_filter_center(labels, bboxes, margins=[0., 0., 0., 0.],
scope=None):
"""Filter out bounding boxes whose center are not in
the rectangle [0, 0, 1, 1] + margins. The margin Tensor
can be used to enforce or loosen this condition.
Return:
labels, bboxes: Filtered elements.
"""
with tf.name_scope(scope, 'bboxes_filter', [labels, bboxes]):
cy = (bboxes[:, 0] + bboxes[:, 2]) / 2.
cx = (bboxes[:, 1] + bboxes[:, 3]) / 2.
mask = tf.greater(cy, margins[0])
mask = tf.logical_and(mask, tf.greater(cx, margins[1]))
mask = tf.logical_and(mask, tf.less(cx, 1. + margins[2]))
mask = tf.logical_and(mask, tf.less(cx, 1. + margins[3]))
# Boolean masking...
labels = tf.boolean_mask(labels, mask)
bboxes = tf.boolean_mask(bboxes, mask)
return labels, bboxes
def bboxes_filter_labels(labels, bboxes,
out_labels=[], num_classes=np.inf,
scope=None):
"""Filter out labels from a collection. Typically used to get
of DontCare elements. Also remove elements based on the number of classes.
Return:
labels, bboxes: Filtered elements.
"""
with tf.name_scope(scope, 'bboxes_filter_labels', [labels, bboxes]):
mask = tf.greater_equal(labels, num_classes)
for l in labels:
mask = tf.logical_and(mask, tf.not_equal(labels, l))
labels = tf.boolean_mask(labels, mask)
bboxes = tf.boolean_mask(bboxes, mask)
return labels, bboxes
# =========================================================================== #
# Standard boxes computation.
# =========================================================================== #
def decodesIntoAccuracy(self, labels, perSymbol = True):
# as the dimensions None x L
accuracyMatrix = tf.equal(self.hardOutputs, labels)
# zero out anything past the labeled length
accuracyMatrix = tf.logical_and(accuracyMatrix,
tf.sequence_mask(self.lengthPlaceholder, maxlen = self.maximumLength))
# Some across all of the time steps to get the total number of predictions correct in each batch entry
accuracyVector = tf.reduce_sum(tf.cast(accuracyMatrix,tf.int32),axis = 1)
if perSymbol:
# Now normalize it by the sequence length and take the average
accuracyVector = tf.divide(tf.cast(accuracyVector,tf.float32),
tf.cast(self.lengthPlaceholder,tf.float32))
if not perSymbol:
# accuracy is measured per sequence
accuracyVector = tf.cast(tf.equal(accuracyVector,self.lengthPlaceholder),tf.float32)
return tf.reduce_mean(accuracyVector)
def _crop(image, offset_height, offset_width, crop_height, crop_width):
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
cropped_shape = control_flow_ops.with_dependencies(
[rank_assertion],
tf.stack([crop_height, crop_width, original_shape[2]]))
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
image = control_flow_ops.with_dependencies(
[size_assertion],
tf.slice(image, offsets, cropped_shape))
return tf.reshape(image, cropped_shape)
def _crop(image, offset_height, offset_width, crop_height, crop_width):
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
cropped_shape = control_flow_ops.with_dependencies(
[rank_assertion],
tf.stack([crop_height, crop_width, original_shape[2]]))
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
image = control_flow_ops.with_dependencies(
[size_assertion],
tf.slice(image, offsets, cropped_shape))
return tf.reshape(image, cropped_shape)
def getBoxes(self, proposals, proposal_scores, maxOutputs=30, nmsThreshold=0.3, scoreThreshold=0.8):
if scoreThreshold is None:
scoreThreshold = 0
with tf.name_scope("getBoxes"):
scores = tf.nn.softmax(self.getBoxScores(proposals))
classes = tf.argmax(scores, 1)
scores = tf.reduce_max(scores, axis=1)
posIndices = tf.cast(tf.where(tf.logical_and(classes > 0, scores>scoreThreshold)), tf.int32)
positives, scores, classes = MultiGather.gather([proposals, scores, classes], posIndices)
positives = self.refineBoxes(positives, False)
#Final NMS
posIndices = tf.image.non_max_suppression(positives, scores, iou_threshold=nmsThreshold, max_output_size=maxOutputs)
posIndices = tf.expand_dims(posIndices, axis=-1)
positives, scores, classes = MultiGather.gather([positives, scores, classes], posIndices)
classes = tf.cast(tf.cast(classes,tf.int32) - 1, tf.uint8)
return positives, scores, classes
def bboxes_filter_center(labels, bboxes, margins=[0., 0., 0., 0.],
scope=None):
"""Filter out bounding boxes whose center are not in
the rectangle [0, 0, 1, 1] + margins. The margin Tensor
can be used to enforce or loosen this condition.
Return:
labels, bboxes: Filtered elements.
"""
with tf.name_scope(scope, 'bboxes_filter', [labels, bboxes]):
cy = (bboxes[:, 0] + bboxes[:, 2]) / 2.
cx = (bboxes[:, 1] + bboxes[:, 3]) / 2.
mask = tf.greater(cy, margins[0])
mask = tf.logical_and(mask, tf.greater(cx, margins[1]))
mask = tf.logical_and(mask, tf.less(cx, 1. + margins[2]))
mask = tf.logical_and(mask, tf.less(cx, 1. + margins[3]))
# Boolean masking...
labels = tf.boolean_mask(labels, mask)
bboxes = tf.boolean_mask(bboxes, mask)
return labels, bboxes
def bboxes_filter_labels(labels, bboxes,
out_labels=[], num_classes=np.inf,
scope=None):
"""Filter out labels from a collection. Typically used to get
of DontCare elements. Also remove elements based on the number of classes.
Return:
labels, bboxes: Filtered elements.
"""
with tf.name_scope(scope, 'bboxes_filter_labels', [labels, bboxes]):
mask = tf.greater_equal(labels, num_classes)
for l in labels:
mask = tf.logical_and(mask, tf.not_equal(labels, l))
labels = tf.boolean_mask(labels, mask)
bboxes = tf.boolean_mask(bboxes, mask)
return labels, bboxes
# =========================================================================== #
# Standard boxes computation.
# =========================================================================== #
def prune_small_boxes(boxlist, min_side, scope=None):
"""Prunes small boxes in the boxlist which have a side smaller than min_side.
Args:
boxlist: BoxList holding N boxes.
min_side: Minimum width AND height of box to survive pruning.
scope: name scope.
Returns:
A pruned boxlist.
"""
with tf.name_scope(scope, 'PruneSmallBoxes'):
height, width = height_width(boxlist)
is_valid = tf.logical_and(tf.greater_equal(width, min_side),
tf.greater_equal(height, min_side))
return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))
def _crop(image, offset_height, offset_width, crop_height, crop_width):
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
cropped_shape = control_flow_ops.with_dependencies(
[rank_assertion],
tf.stack([crop_height, crop_width, original_shape[2]]))
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
image = control_flow_ops.with_dependencies(
[size_assertion],
tf.slice(image, offsets, cropped_shape))
return tf.reshape(image, cropped_shape)
def compute_mask(self, inputs, mask=None):
dimension = K.ndim(inputs)
mask_tensor = K.any(K.not_equal(inputs, self.mask_value), axis=-1)
mask_base = K.any(mask_tensor, axis=1, keepdims=True)
for axis in range(2, dimension - 1):
mask_axis = K.any(mask_tensor, axis=axis, keepdims=True)
mask_base = tf.logical_and(mask_base, mask_axis)
return mask_base
def pairwise_and(a, b):
column = tf.expand_dims(a, 2)
row = tf.expand_dims(b, 1)
return tf.logical_and(column, row)
def _get_input_filter(width, width_threshold, length, length_threshold):
"""Boolean op for discarding input data based on string or image size
Input:
width : Tensor representing the image width
width_threshold : Python numerical value (or None) representing the
maximum allowable input image width
length : Tensor representing the ground truth string length
length_threshold : Python numerical value (or None) representing the
maximum allowable input string length
Returns:
keep_input : Boolean Tensor indicating whether to keep a given input
with the specified image width and string length
"""
keep_input = None
if width_threshold!=None:
keep_input = tf.less_equal(width, width_threshold)
if length_threshold!=None:
length_filter = tf.less_equal(length, length_threshold)
if keep_input==None:
keep_input = length_filter
else:
keep_input = tf.logical_and( keep_input, length_filter)
if keep_input==None:
keep_input = True
else:
keep_input = tf.reshape( keep_input, [] ) # explicitly make a scalar
return keep_input
def __init__(self, clips, labels, class_num=24, height=128, width=128, seq_length=16, c_dim=3, \
batch_size=32, keep_prob=1.0, is_training=True, encoder_gradient_ratio=1.0, use_pretrained_encoder=False):
self.seq = clips
self.labels = labels
self.class_num = class_num
self.batch_size = batch_size
self.height = height
self.width = width
self.seq_length = seq_length
self.c_dim = c_dim
self.dropout = keep_prob
self.encoder_gradient_ratio = encoder_gradient_ratio
self.use_pretrained_encoder = use_pretrained_encoder
self.seq_shape = [seq_length, height, width, c_dim]
self.batch_norm_params = {
'is_training': is_training,
'decay': 0.9,
'epsilon': 1e-5,
'scale': True,
'center': True,
'updates_collections': tf.GraphKeys.UPDATE_OPS
}
pred_logits = self.build_model()
self.ac_loss = tf.reduce_mean(\
tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=pred_logits))
prob = tf.nn.softmax(pred_logits)
pred = tf.one_hot(tf.nn.top_k(prob).indices, self.class_num)
pred = tf.squeeze(pred, axis=1)
pred = tf.cast(pred, tf.bool)
labels = tf.cast(labels, tf.bool)
self.ac = tf.reduce_sum(tf.cast(tf.logical_and(labels, pred), tf.float32)) / self.batch_size
def segment_sample_select(probs, segment_ids):
num_segments = tf.reduce_max(segment_ids) + 1
sampled = tf.random_uniform([num_segments])
def scan_fn(acc, x):
p, i = x[0], x[1]
prev_v = tf.gather(acc[0], i)
new_probs = acc[0] + tf.one_hot(i, num_segments, p)
select = tf.logical_and(tf.less(prev_v, 0.0), tf.greater_equal(prev_v + p, 0.0))
return new_probs, select
_, selection = tf.scan(scan_fn, (probs, segment_ids), initializer=(-sampled, False))
return selection
def filter_small_gt(gt_bboxes, gt_cats, min_size):
mask = tf.logical_and(gt_bboxes[:, 2] >= min_size,
gt_bboxes[:, 3] >= min_size)
return tf.boolean_mask(gt_bboxes, mask), tf.boolean_mask(gt_cats, mask)