def vis_detections(im, class_name, dets, thresh=0.3):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.title('{} {:.3f}'.format(class_name, score))
plt.show()
python类minimum()的实例源码
def vis_detections(im, class_name, dets, thresh=0.3):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.title('{} {:.3f}'.format(class_name, score))
plt.show()
def batch_iou(boxes, box):
"""Compute the Intersection-Over-Union of a batch of boxes with another
box.
Args:
box1: 2D array of [cx, cy, width, height].
box2: a single array of [cx, cy, width, height]
Returns:
ious: array of a float number in range [0, 1].
"""
lr = np.maximum(
np.minimum(boxes[:,0]+0.5*boxes[:,2], box[0]+0.5*box[2]) - \
np.maximum(boxes[:,0]-0.5*boxes[:,2], box[0]-0.5*box[2]),
0
)
tb = np.maximum(
np.minimum(boxes[:,1]+0.5*boxes[:,3], box[1]+0.5*box[3]) - \
np.maximum(boxes[:,1]-0.5*boxes[:,3], box[1]-0.5*box[3]),
0
)
inter = lr*tb
union = boxes[:,2]*boxes[:,3] + box[2]*box[3] - inter
return inter/union
def imin(arrays, axis, ignore_nan = False):
"""
Minimum of a stream of arrays along an axis.
Parameters
----------
arrays : iterable
Arrays to be reduced.
axis : int or None, optional
Axis along which the minimum is found. The default
is to find the minimum along the 'stream axis', as if all arrays in ``array``
were stacked along a new dimension. If ``axis = None``, arrays in ``arrays`` are flattened
before reduction.
ignore_nan : bool, optional
If True, NaNs are ignored. Default is propagation of NaNs.
Yields
------
online_min : ndarray
Cumulative minimum.
"""
ufunc = np.fmin if ignore_nan else np.minimum
yield from ireduce_ufunc(arrays, ufunc, axis)
def test_train(self):
model, fetches_ = self._test_pipeline(tf.contrib.learn.ModeKeys.TRAIN)
predictions_, loss_, _ = fetches_
target_len = self.sequence_length + 10 + 2
max_decode_length = model.params["target.max_seq_len"]
expected_decode_len = np.minimum(target_len, max_decode_length)
np.testing.assert_array_equal(predictions_["logits"].shape, [
self.batch_size, expected_decode_len - 1,
model.target_vocab_info.total_size
])
np.testing.assert_array_equal(predictions_["losses"].shape,
[self.batch_size, expected_decode_len - 1])
np.testing.assert_array_equal(predictions_["predicted_ids"].shape,
[self.batch_size, expected_decode_len - 1])
self.assertFalse(np.isnan(loss_))
def eval_one_dataset(self, sess, dataset, save_dir, subset='train'):
count = 0
print('num_examples:', dataset._num_examples)
while count < dataset._num_examples:
start = count % dataset._num_examples
images, embeddings_batchs, filenames, _ =\
dataset.next_batch_test(self.batch_size, start, 1)
print('count = ', count, 'start = ', start)
for i in range(len(embeddings_batchs)):
samples_batchs = []
# Generate up to 16 images for each sentence,
# with randomness from noise z and conditioning augmentation.
for j in range(np.minimum(16, cfg.TRAIN.NUM_COPY)):
samples = sess.run(self.fake_images,
{self.embeddings: embeddings_batchs[i]})
samples_batchs.append(samples)
self.save_super_images(images, samples_batchs,
filenames, i, save_dir,
subset)
count += self.batch_size
def custom_crop(img, bbox):
# bbox = [x-left, y-top, width, height]
imsiz = img.shape # [height, width, channel]
# if box[0] + box[2] >= imsiz[1] or\
# box[1] + box[3] >= imsiz[0] or\
# box[0] <= 0 or\
# box[1] <= 0:
# box[0] = np.maximum(0, box[0])
# box[1] = np.maximum(0, box[1])
# box[2] = np.minimum(imsiz[1] - box[0] - 1, box[2])
# box[3] = np.minimum(imsiz[0] - box[1] - 1, box[3])
center_x = int((2 * bbox[0] + bbox[2]) / 2)
center_y = int((2 * bbox[1] + bbox[3]) / 2)
R = int(np.maximum(bbox[2], bbox[3]) * 0.75)
y1 = np.maximum(0, center_y - R)
y2 = np.minimum(imsiz[0], center_y + R)
x1 = np.maximum(0, center_x - R)
x2 = np.minimum(imsiz[1], center_x + R)
img_cropped = img[y1:y2, x1:x2, :]
return img_cropped
def append(self, x):
self._count += 1
if self._count == 1:
self.m = x
self.last_m = x
self.last_s = 0.0
self.min = x
self.max = x
else:
self.m = self.last_m + (x - self.last_m) / self._count
self.s = self.last_s + (x - self.last_m) * (x - self.m)
self.last_m = self.m
self.last_s = self.s
self.min = numpy.minimum(self.min, x)
self.max = numpy.maximum(self.max, x)
def __init__(self, card, skill_up=0):
skill = card.skill
if skill is None:
self.trigger_type = None
return
# Skill type
self.trigger_type = skill.trigger_type
self.effect_type = skill.effect_type
# Skill data
self.cooldown = skill.trigger_count
self.prob = np.minimum(100, (1+skill_up) * skill.odds) / 100
self.reward = skill.reward
self.duration = skill.reward if self.effect_type in ['Weak Judge', 'Strong Judge'] else 0
# Skill gem
self.score_boost, self.heal_boost = 1, 0
for gem in card.equipped_gems:
if gem.effect == 'score_boost':
self.score_boost = gem.value
elif gem.effect == 'heal_boost':
self.heal_boost = gem.value
self.init_state()
def to_LLTB(self, filename='cards.666', rare=True):
def gen_row(index, c):
card = raw_card_dict[str(c['card_id'])].copy()
card.idolize(c['idolized'])
card.level_up(skill_level=c['skill'].level, slot_num=c['slot_num'])
# name = str(index)+':'+card.card_name if card.card_name != ' ' else 'NOTSET'
name = str(index)+':'+card.member_name if card.card_name != ' ' else 'NOTSET'
info = [TB_member_dict[card.member_name], name] + adjusted_card_stat(card) + \
get_skill_stat(card.skill, card.skill.level) + get_cskill_stat(card.cskill) + [card.slot_num]
return '\t'.join([str(x) for x in info])+'\t'
df = self.owned_card.copy()
df = df[df.apply(lambda x: x.member_name in list(TB_member_dict.keys()), axis=1)]
if rare:
df = df[df.apply(lambda x: not x.promo and (x.rarity in ['UR','SSR'] or (x.rarity == 'SR' and x.idolized)), axis=1)]
df = df[['card_id', 'idolized', 'skill', 'slot_num']]
card_info = '\n'.join([gen_row(i,c) for i, c in df.iterrows()])
gem_info = '-2 ' + ' '.join([str(np.minimum(self.owned_gem[x],9)) for x in TB_gem_skill_list])
with codecs.open(filename, 'w', encoding='utf-16') as fp:
fp.write('\n\n'.join([card_info, gem_info]))
print('file saved to', filename)
def vis_detections(im, class_name, dets, thresh=0.3):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.title('{} {:.3f}'.format(class_name, score))
plt.show()
def vis_detections(im, class_name, dets, thresh=0.3):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.title('{} {:.3f}'.format(class_name, score))
plt.show()
def vis_detections(im, class_name, dets, thresh=0.3):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.title('{} {:.3f}'.format(class_name, score))
plt.show()
def get_IOU(rec1, rec2):
"""
rec1&2 are both np.arrays with x_center, y_center, width, height
should work with any dimension as long as the last dimension is 4
"""
rec1_xy_max = rec1[..., :2] + (rec1[..., 2:4] - 1) / 2
rec1_xy_min = rec1[..., :2] - (rec1[..., 2:4] - 1) / 2
rec2_xy_max = rec2[..., :2] + (rec2[..., 2:4] - 1) / 2
rec2_xy_min = rec2[..., :2] - (rec2[..., 2:4] - 1) / 2
intersec_max = np.minimum(rec1_xy_max, rec2_xy_max)
intersec_min = np.maximum(rec1_xy_min, rec2_xy_min)
intersec_wh = np.maximum(intersec_max - intersec_min + 1, 0)
intersec_area = intersec_wh[..., 0] * intersec_wh[..., 1]
area1 = rec1[..., 2] * rec1[..., 3]
area2 = rec2[..., 2] * rec2[..., 3]
union = area1 + area2 - intersec_area
return intersec_area / union
def overlap_ratio(boxes1, boxes2):
# find intersection bbox
x_int_bot = np.maximum(boxes1[:, 0], boxes2[0])
x_int_top = np.minimum(boxes1[:, 0] + boxes1[:, 2], boxes2[0] + boxes2[2])
y_int_bot = np.maximum(boxes1[:, 1], boxes2[1])
y_int_top = np.minimum(boxes1[:, 1] + boxes1[:, 3], boxes2[1] + boxes2[3])
# find intersection area
dx = x_int_top - x_int_bot
dy = y_int_top - y_int_bot
area_int = np.where(np.logical_and(dx>0, dy>0), dx * dy, np.zeros_like(dx))
# find union
area_union = boxes1[:,2] * boxes1[:,3] + boxes2[2] * boxes2[3] - area_int
# find overlap ratio
ratio = np.where(area_union > 0, area_int/area_union, np.zeros_like(area_int))
return ratio
###########################################################################
# overlap_ratio of two bboxes #
###########################################################################
def overlap_ratio_pair(boxes1, boxes2):
# find intersection bbox
x_int_bot = np.maximum(boxes1[:, 0], boxes2[:, 0])
x_int_top = np.minimum(boxes1[:, 0] + boxes1[:, 2], boxes2[:, 0] + boxes2[:, 2])
y_int_bot = np.maximum(boxes1[:, 1], boxes2[:, 1])
y_int_top = np.minimum(boxes1[:, 1] + boxes1[:, 3], boxes2[:, 1] + boxes2[:, 3])
# find intersection area
dx = x_int_top - x_int_bot
dy = y_int_top - y_int_bot
area_int = np.where(np.logical_and(dx>0, dy>0), dx * dy, np.zeros_like(dx))
# find union
area_union = boxes1[:,2] * boxes1[:,3] + boxes2[:, 2] * boxes2[:, 3] - area_int
# find overlap ratio
ratio = np.where(area_union > 0, area_int/area_union, np.zeros_like(area_int))
return ratio
def apply_perturbations(i, j, X, increase, theta, clip_min, clip_max):
"""
TensorFlow implementation for apply perturbations to input features based
on salency maps
:param i: index of first selected feature
:param j: index of second selected feature
:param X: a matrix containing our input features for our sample
:param increase: boolean; true if we are increasing pixels, false otherwise
:param theta: delta for each feature adjustment
:param clip_min: mininum value for a feature in our sample
:param clip_max: maximum value for a feature in our sample
: return: a perturbed input feature matrix for a target class
"""
# perturb our input sample
if increase:
X[0, i] = np.minimum(clip_max, X[0, i] + theta)
X[0, j] = np.minimum(clip_max, X[0, j] + theta)
else:
X[0, i] = np.maximum(clip_min, X[0, i] - theta)
X[0, j] = np.maximum(clip_min, X[0, j] - theta)
return X
def iou_loss(p, t):
# print "pass"
tp, tt = p.reshape((p.shape[0], 2, 2)), t.reshape((t.shape[0], 2, 2))
overlaps_t0 = T.maximum(tp[:, 0, :], tt[:, 0, :])
overlaps_t1 = T.minimum(tp[:, 1, :], tt[:, 1, :])
intersection = overlaps_t1 - overlaps_t0
bool_overlap = T.min(intersection, axis=1) > 0
intersection = intersection[:, 0] * intersection[:, 1]
intersection = T.maximum(intersection, np.float32(0.))
dims_p = tp[:, 1, :] - tp[:, 0, :]
areas_p = dims_p[:, 0] * dims_p[:, 1]
dims_t = tt[:, 1, :] - tt[:, 0, :]
areas_t = dims_t[:, 0] * dims_t[:, 1]
union = areas_p + areas_t - intersection
loss = 1. - T.minimum(
T.exp(T.log(T.abs_(intersection)) -
T.log(T.abs_(union) + np.float32(1e-5))),
np.float32(1.)
)
# return loss
return T.mean(loss)
def iou_loss_val(p, t):
tp, tt = p.reshape((p.shape[0], 2, 2)), t.reshape((t.shape[0], 2, 2))
overlaps = np.zeros_like(tp, dtype=np.float32)
overlaps[:, 0, :] = np.maximum(tp[:, 0, :], tt[:, 0, :])
overlaps[:, 1, :] = np.minimum(tp[:, 1, :], tt[:, 1, :])
intersection = overlaps[:, 1, :] - overlaps[:, 0, :]
bool_overlap = np.min(intersection, axis=1) > 0
intersection = intersection[:, 0] * intersection[:, 1]
intersection = np.maximum(intersection, 0.)
# print "bool", bool_overlap
# print "Int", intersection
dims_p = tp[:, 1, :] - tp[:, 0, :]
areas_p = dims_p[:, 0] * dims_p[:, 1]
dims_t = tt[:, 1, :] - tt[:, 0, :]
areas_t = dims_t[:, 0] * dims_t[:, 1]
union = areas_p + areas_t - intersection
# print "un", union
loss = 1. - np.minimum(
np.exp(np.log(np.abs(intersection)) - np.log(np.abs(union) + 1e-5)),
1.
)
# print loss
return np.mean(loss)
def _exp_single(x):
"""Sanitized exponential function.
Since this method internally calls np.exp and carries
the (very likely) possibility to overflow, the method
suppresses all warnings.
#XXX: at some point we might want to let ``suppress_warnings``
# specify exactly which types of warnings it should filter.
Parameters
----------
x : float, int
The number to exp
Returns
-------
val : float
the exp of x
"""
val = np.minimum(__max_exp__, np.exp(x))
return val
def reshape_to_yolo_size(img):
input_width, input_height = img.size
min_pixel = 320.0
#max_pixel = 608
max_pixel = 1024.0
min_edge = np.minimum(input_width, input_height)
if min_edge < min_pixel:
input_width *= min_pixel / min_edge
input_height *= min_pixel / min_edge
max_edge = np.maximum(input_width, input_height)
if max_edge > max_pixel:
input_width *= max_pixel / max_edge
input_height *= max_pixel / max_edge
input_width = int(input_width / 32.0 + round(input_width % 32 / 32.0)) * 32
input_height = int(input_height / 32.0 + round(input_height % 32 / 32.0)) * 32
img = img.resize((input_width, input_height))
return img
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.minimum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_truth_table_logical(self):
# 2, 3 and 4 serves as true values
input1 = [0, 0, 3, 2]
input2 = [0, 4, 0, 2]
typecodes = (np.typecodes['AllFloat']
+ np.typecodes['AllInteger']
+ '?') # boolean
for dtype in map(np.dtype, typecodes):
arg1 = np.asarray(input1, dtype=dtype)
arg2 = np.asarray(input2, dtype=dtype)
# OR
out = [False, True, True, True]
for func in (np.logical_or, np.maximum):
assert_equal(func(arg1, arg2).astype(bool), out)
# AND
out = [False, False, False, True]
for func in (np.logical_and, np.minimum):
assert_equal(func(arg1, arg2).astype(bool), out)
# XOR
out = [False, True, True, False]
for func in (np.logical_xor, np.not_equal):
assert_equal(func(arg1, arg2).astype(bool), out)
def test_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = with_wrap()
r.arr = arr
r.context = context
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
func, args, i = x.context
self.assertTrue(func is ncu.minimum)
self.assertEqual(len(args), 2)
assert_equal(args[0], a)
assert_equal(args[1], a)
self.assertEqual(i, 0)
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def test_minimummaximum_func(self):
a = np.ones((2, 2))
aminimum = minimum(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum(a, a))
aminimum = minimum.outer(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum.outer(a, a))
amaximum = maximum(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum(a, a))
amaximum = maximum.outer(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum.outer(a, a))
def make_sampling_table(size, sampling_factor=1e-5):
'''This generates an array where the ith element
is the probability that a word of rank i would be sampled,
according to the sampling distribution used in word2vec.
The word2vec formula is:
p(word) = min(1, sqrt(word.frequency/sampling_factor) / (word.frequency/sampling_factor))
We assume that the word frequencies follow Zipf's law (s=1) to derive
a numerical approximation of frequency(rank):
frequency(rank) ~ 1/(rank * (log(rank) + gamma) + 1/2 - 1/(12*rank))
where gamma is the Euler-Mascheroni constant.
# Arguments
size: int, number of possible words to sample.
'''
gamma = 0.577
rank = np.array(list(range(size)))
rank[0] = 1
inv_fq = rank * (np.log(rank) + gamma) + 0.5 - 1./(12.*rank)
f = sampling_factor * inv_fq
return np.minimum(1., f / np.sqrt(f))
def batch_iou(proposals, gt):
bboxes = np.transpose(proposals).reshape((4, -1, 1))
bboxes_x1 = bboxes[0]
bboxes_x2 = bboxes[0]+bboxes[2]
bboxes_y1 = bboxes[1]
bboxes_y2 = bboxes[1]+bboxes[3]
gt = np.transpose(gt).reshape((4, 1, -1))
gt_x1 = gt[0]
gt_x2 = gt[0]+gt[2]
gt_y1 = gt[1]
gt_y2 = gt[1]+gt[3]
widths = np.maximum(0, np.minimum(bboxes_x2, gt_x2) -
np.maximum(bboxes_x1, gt_x1))
heights = np.maximum(0, np.minimum(bboxes_y2, gt_y2) -
np.maximum(bboxes_y1, gt_y1))
intersection = widths*heights
union = bboxes[2]*bboxes[3] + gt[2]*gt[3] - intersection
return (intersection / union)
def batch_iou(proposals, gt):
bboxes = np.transpose(proposals).reshape((4, -1, 1))
bboxes_x1 = bboxes[0]
bboxes_x2 = bboxes[0]+bboxes[2]
bboxes_y1 = bboxes[1]
bboxes_y2 = bboxes[1]+bboxes[3]
gt = np.transpose(gt).reshape((4, 1, -1))
gt_x1 = gt[0]
gt_x2 = gt[0]+gt[2]
gt_y1 = gt[1]
gt_y2 = gt[1]+gt[3]
widths = np.maximum(0, np.minimum(bboxes_x2, gt_x2) -
np.maximum(bboxes_x1, gt_x1))
heights = np.maximum(0, np.minimum(bboxes_y2, gt_y2) -
np.maximum(bboxes_y1, gt_y1))
intersection = widths*heights
union = bboxes[2]*bboxes[3] + gt[2]*gt[3] - intersection
return (intersection / union)
def decode_bboxes(tcoords, anchors):
var_x, var_y, var_w, var_h = config['prior_variance']
t_x = tcoords[:, 0]*var_x
t_y = tcoords[:, 1]*var_y
t_w = tcoords[:, 2]*var_w
t_h = tcoords[:, 3]*var_h
a_w = anchors[:, 2]
a_h = anchors[:, 3]
a_x = anchors[:, 0]+a_w/2
a_y = anchors[:, 1]+a_h/2
x = t_x*a_w + a_x
y = t_y*a_h + a_y
w = tf.exp(t_w)*a_w
h = tf.exp(t_h)*a_h
x1 = tf.maximum(0., x - w/2)
y1 = tf.maximum(0., y - h/2)
x2 = tf.minimum(1., w + x1)
y2 = tf.minimum(1., h + y1)
return tf.stack([y1, x1, y2, x2], axis=1)