def _estimate_lambda_single_y(y):
"""Estimate lambda for a single y, given a range of lambdas
through which to search. No validation performed.
Parameters
----------
y : ndarray, shape (n_samples,)
The vector being estimated against
"""
# ensure is array
y = np.array(y)
# Use scipy's log-likelihood estimator
b = boxcox(y, lmbda=None)
# Return lambda corresponding to maximum P
return b[1]
python类maximum()的实例源码
def fit(self, graphs, y=None):
rnd = check_random_state(self.random_state)
n_samples = len(graphs)
# get basis vectors
if self.n_components > n_samples:
n_components = n_samples
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = []
for ind in basis_inds:
basis.append(graphs[ind])
basis_kernel = self.kernel(basis, basis, **self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def triplet_loss(anchor, positive, negative, alpha):
"""Calculate the triplet loss according to the FaceNet paper
Args:
anchor: the embeddings for the anchor images.
positive: the embeddings for the positive images.
negative: the embeddings for the negative images.
Returns:
the triplet loss according to the FaceNet paper as a float tensor.
"""
with tf.variable_scope('triplet_loss'):
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
return loss
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def batch_iou(boxes, box):
"""Compute the Intersection-Over-Union of a batch of boxes with another
box.
Args:
box1: 2D array of [cx, cy, width, height].
box2: a single array of [cx, cy, width, height]
Returns:
ious: array of a float number in range [0, 1].
"""
lr = np.maximum(
np.minimum(boxes[:,0]+0.5*boxes[:,2], box[0]+0.5*box[2]) - \
np.maximum(boxes[:,0]-0.5*boxes[:,2], box[0]-0.5*box[2]),
0
)
tb = np.maximum(
np.minimum(boxes[:,1]+0.5*boxes[:,3], box[1]+0.5*box[3]) - \
np.maximum(boxes[:,1]-0.5*boxes[:,3], box[1]-0.5*box[3]),
0
)
inter = lr*tb
union = boxes[:,2]*boxes[:,3] + box[2]*box[3] - inter
return inter/union
def imax(arrays, axis, ignore_nan = False):
"""
Maximum of a stream of arrays along an axis.
Parameters
----------
arrays : iterable
Arrays to be reduced.
axis : int or None, optional
Axis along which the maximum is found. The default
is to find the maximum along the 'stream axis', as if all arrays in ``array``
were stacked along a new dimension. If ``axis = None``, arrays in ``arrays`` are flattened
before reduction.
ignore_nan : bool, optional
If True, NaNs are ignored. Default is propagation of NaNs.
Yields
------
online_max : ndarray
Cumulative maximum.
"""
ufunc = np.fmax if ignore_nan else np.maximum
yield from ireduce_ufunc(arrays, ufunc, axis)
def fCauchy(ftrue, alpha, p):
"""Returns Cauchy model noisy value
Cauchy with median 1e3*alpha and with p=0.2, zero otherwise
P(Cauchy > 1,10,100,1000) = 0.25, 0.032, 0.0032, 0.00032
"""
# expects ftrue to be a np.array
popsi = np.shape(ftrue)
fval = ftrue + alpha * np.maximum(0., 1e3 + (_rand(popsi) < p) *
_randn(popsi) / (np.abs(_randn(popsi)) + 1e-199))
tol = 1e-8
fval = fval + 1.01 * tol
idx = ftrue < tol
try:
fval[idx] = ftrue[idx]
except IndexError: # fval is a scalar
if idx:
fval = ftrue
return fval
### CLASS DEFINITION ###
def forward(self, input):
"""During the forward pass, it inhibits all inhibitions below some
threshold :math:`?`, typically :math:`0`. In other words, it computes point-wise
.. math:: y=max(0,x)
Parameters
----------
x : float32
The activation (the summed, weighted input of a neuron).
Returns
-------
float32
The output of the rectify function applied to the activation.
"""
self.last_forward = input
return np.maximum(0.0, input)
def update(self, params, grads):
# init
self.iterations += 1
a_t = self.lr / (1 - np.power(self.beta1, self.iterations))
if self.ms is None:
self.ms = [_zero(p.shape) for p in params]
if self.vs is None:
self.vs = [_zero(p.shape) for p in params]
# update parameters
for i, (m, v, p, g) in enumerate(zip(self.ms, self.vs, params, grads)):
m = self.beta1 * m + (1 - self.beta1) * g
v = np.maximum(self.beta2 * v, np.abs(g))
p -= a_t * m / (v + self.epsilon)
self.ms[i] = m
self.vs[i] = v
def plot_beta():
'''plot beta over training
'''
beta = args.beta
scale = args.scale
beta_min = args.beta_min
num_epoch = args.num_epoch
epoch_size = int(float(args.num_examples) / args.batch_size)
x = np.arange(num_epoch*epoch_size)
y = beta * np.power(scale, x)
y = np.maximum(y, beta_min)
epoch_x = np.arange(num_epoch) * epoch_size
epoch_y = beta * np.power(scale, epoch_x)
epoch_y = np.maximum(epoch_y, beta_min)
# plot beta descent curve
plt.semilogy(x, y)
plt.semilogy(epoch_x, epoch_y, 'ro')
plt.title('beta descent')
plt.ylabel('beta')
plt.xlabel('epoch')
plt.show()
def convert_to_square(bbox):
"""Convert bbox to square
Parameters:
----------
bbox: numpy array , shape n x 5
input bbox
Returns:
-------
square bbox
"""
square_bbox = bbox.copy()
h = bbox[:, 3] - bbox[:, 1] + 1
w = bbox[:, 2] - bbox[:, 0] + 1
max_side = np.maximum(h,w)
square_bbox[:, 0] = bbox[:, 0] + w*0.5 - max_side*0.5
square_bbox[:, 1] = bbox[:, 1] + h*0.5 - max_side*0.5
square_bbox[:, 2] = square_bbox[:, 0] + max_side - 1
square_bbox[:, 3] = square_bbox[:, 1] + max_side - 1
return square_bbox
def custom_crop(img, bbox):
# bbox = [x-left, y-top, width, height]
imsiz = img.shape # [height, width, channel]
# if box[0] + box[2] >= imsiz[1] or\
# box[1] + box[3] >= imsiz[0] or\
# box[0] <= 0 or\
# box[1] <= 0:
# box[0] = np.maximum(0, box[0])
# box[1] = np.maximum(0, box[1])
# box[2] = np.minimum(imsiz[1] - box[0] - 1, box[2])
# box[3] = np.minimum(imsiz[0] - box[1] - 1, box[3])
center_x = int((2 * bbox[0] + bbox[2]) / 2)
center_y = int((2 * bbox[1] + bbox[3]) / 2)
R = int(np.maximum(bbox[2], bbox[3]) * 0.75)
y1 = np.maximum(0, center_y - R)
y2 = np.minimum(imsiz[0], center_y + R)
x1 = np.maximum(0, center_x - R)
x2 = np.minimum(imsiz[1], center_x + R)
img_cropped = img[y1:y2, x1:x2, :]
return img_cropped
def append(self, x):
self._count += 1
if self._count == 1:
self.m = x
self.last_m = x
self.last_s = 0.0
self.min = x
self.max = x
else:
self.m = self.last_m + (x - self.last_m) / self._count
self.s = self.last_s + (x - self.last_m) * (x - self.m)
self.last_m = self.m
self.last_s = self.s
self.min = numpy.minimum(self.min, x)
self.max = numpy.maximum(self.max, x)
def clip_boxes(boxes, im_shape):
"""
Clip boxes to image boundaries.
:param boxes: [N, 4* num_classes]
:param im_shape: tuple of 2
:return: [N, 4* num_classes]
"""
# x1 >= 0
boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return boxes
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(num_classes)]
for cls_ind in xrange(num_classes):
for im_ind in xrange(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def get_IOU(rec1, rec2):
"""
rec1&2 are both np.arrays with x_center, y_center, width, height
should work with any dimension as long as the last dimension is 4
"""
rec1_xy_max = rec1[..., :2] + (rec1[..., 2:4] - 1) / 2
rec1_xy_min = rec1[..., :2] - (rec1[..., 2:4] - 1) / 2
rec2_xy_max = rec2[..., :2] + (rec2[..., 2:4] - 1) / 2
rec2_xy_min = rec2[..., :2] - (rec2[..., 2:4] - 1) / 2
intersec_max = np.minimum(rec1_xy_max, rec2_xy_max)
intersec_min = np.maximum(rec1_xy_min, rec2_xy_min)
intersec_wh = np.maximum(intersec_max - intersec_min + 1, 0)
intersec_area = intersec_wh[..., 0] * intersec_wh[..., 1]
area1 = rec1[..., 2] * rec1[..., 3]
area2 = rec2[..., 2] * rec2[..., 3]
union = area1 + area2 - intersec_area
return intersec_area / union
def set_responsibilities(anchor_frames, iou_thresh=0.6):
"""
Changes the IOU values for the anchor frames to binary values
anchor_frames: list of frames where each frame contains all features for a specific anchor
iou_thresh: threshold to decide which anchor is responsible
"""
# set box with maximum IOU to 1
anchor_frames = [frame.copy() for frame in anchor_frames]
# find maximum IOU value over all frames
helper_array = np.array([frame[frame.columns[0]] for frame in anchor_frames]).T
max_indices = np.argmax(helper_array, axis=1)
data_idx = np.arange(len(max_indices))
for obj_idx, frame_idx in zip(data_idx, max_indices):
temp_frame = anchor_frames[frame_idx]
temp_frame.loc[obj_idx, temp_frame.columns[0]] = 1
# applying the iou threshold on a copy of the dataframes
for frame in anchor_frames:
frame[frame.columns[0]] = np.digitize(frame[frame.columns[0]], [iou_thresh])
return anchor_frames
def divergence(self, V, W, H):
"""
Compute divergence between reconstruction and original
"""
R = np.maximum(np.dot(W,H), eps)
V = np.maximum(V, eps)
err = 0
if self.update == self.kl_updates:
err = np.sum(np.multiply(V, np.log(V/R)) - V + R)
elif self.update == self.euc_updates:
err = np.sum((V - np.dot(W,H)) ** 2)
elif self.update == self.is_updates:
err = np.sum(V/R - np.log(V/R) - 1)
elif self.update == self.beta_updates:
err = (np.sum(V ** self.beta + (self.beta -1) * R ** self.beta
- self.beta * V * R ** (self.beta - 1))
/ (self.beta * (self.beta - 1)))
return err
def beta_updates(self, V, W, H):
"""
Optimize B-divergence
"""
if self.update_W:
R = np.maximum(np.dot(W,H), eps)
W *= (np.dot(R ** (self.beta - 2) * V, H.T) /
np.maximum(np.dot(R ** (self.beta -1), H.T), eps))
W = self.normalize(W, self.W_norm, 0)
if self.update_H:
R = np.maximum(np.dot(W,H), eps)
H *= (np.dot(W.T, R ** (self.beta -2) * V) /
np.maximum(np.dot(W.T, R ** (self.beta -1)), eps))
H = self.normalize(H, self.H_norm, 1)
return [V, W, H]
def interp1d_(xin_,xp,yp_):
"""
Interpolate a uniformly sampled piecewise linear function. Mapping elements
from xin_ to the result. Input values will be clipped to range of xp.
xin_ : input tensor (real)
xp : x grid (constant -- must be a 1d numpy array, uniformly spaced)
yp_ : tensor of the result values at the gridpoints xp
"""
import tensorflow as tf
x_ = tf.clip_by_value(xin_,xp.min(),xp.max())
dx = xp[1]-xp[0]
assert len(xp.shape)==1,'only 1d interpolation'
assert xp.shape[0]==int(yp_.get_shape()[0])
assert abs(np.diff(xp)/dx - 1.0).max() < 1e-6,'must be uniformly sampled'
newshape = [ ]
x1_ = tf.expand_dims(x_,-1)
dt = yp_.dtype
wt_ = tf.maximum(tf.constant(0.,dtype=dt), 1-abs(x1_ - tf.constant(xp,dtype=dt))/dx )
y_ = tf.reduce_sum(wt_ * yp_,axis=-1)
return y_
def overlap_ratio(boxes1, boxes2):
# find intersection bbox
x_int_bot = np.maximum(boxes1[:, 0], boxes2[0])
x_int_top = np.minimum(boxes1[:, 0] + boxes1[:, 2], boxes2[0] + boxes2[2])
y_int_bot = np.maximum(boxes1[:, 1], boxes2[1])
y_int_top = np.minimum(boxes1[:, 1] + boxes1[:, 3], boxes2[1] + boxes2[3])
# find intersection area
dx = x_int_top - x_int_bot
dy = y_int_top - y_int_bot
area_int = np.where(np.logical_and(dx>0, dy>0), dx * dy, np.zeros_like(dx))
# find union
area_union = boxes1[:,2] * boxes1[:,3] + boxes2[2] * boxes2[3] - area_int
# find overlap ratio
ratio = np.where(area_union > 0, area_int/area_union, np.zeros_like(area_int))
return ratio
###########################################################################
# overlap_ratio of two bboxes #
###########################################################################
def overlap_ratio_pair(boxes1, boxes2):
# find intersection bbox
x_int_bot = np.maximum(boxes1[:, 0], boxes2[:, 0])
x_int_top = np.minimum(boxes1[:, 0] + boxes1[:, 2], boxes2[:, 0] + boxes2[:, 2])
y_int_bot = np.maximum(boxes1[:, 1], boxes2[:, 1])
y_int_top = np.minimum(boxes1[:, 1] + boxes1[:, 3], boxes2[:, 1] + boxes2[:, 3])
# find intersection area
dx = x_int_top - x_int_bot
dy = y_int_top - y_int_bot
area_int = np.where(np.logical_and(dx>0, dy>0), dx * dy, np.zeros_like(dx))
# find union
area_union = boxes1[:,2] * boxes1[:,3] + boxes2[:, 2] * boxes2[:, 3] - area_int
# find overlap ratio
ratio = np.where(area_union > 0, area_int/area_union, np.zeros_like(area_int))
return ratio
def lerp(p0, p1, t):
"""Linear interpolation."""
return (1.0 - t) * p0 + t * p1
# A note on formats:
# Sketches are encoded as a sequence of strokes. stroke-3 and stroke-5 are
# different stroke encodings.
# stroke-3 uses 3-tuples, consisting of x-offset, y-offset, and a binary
# variable which is 1 if the pen is lifted between this position and
# the next, and 0 otherwise.
# stroke-5 consists of x-offset, y-offset, and p_1, p_2, p_3, a binary
# one-hot vector of 3 possible pen states: pen down, pen up, end of sketch.
# See section 3.1 of https://arxiv.org/abs/1704.03477 for more detail.
# Sketch-RNN takes input in stroke-5 format, with sketches padded to a common
# maximum length and prefixed by the special start token [0, 0, 1, 0, 0]
# The QuickDraw dataset is stored using stroke-3.
def eqsize(*args):
m = 0
varargout = [None] * (len(args) + 1)
for a in range(0, len(args)):
p1 = args[a]
for i in range(0, p1.size):
m = np.maximum(m, (p1[i].monomials).shape[0])
for a in range(len(args), -1, -1):
p1 = args[a]
for i in range(0, p1.size):
if (p1[i].monomials).shape[0] < m:
p1[i].monomials[m, :] = 0
varargout[a] = p1
return varargout
def apply_perturbations(i, j, X, increase, theta, clip_min, clip_max):
"""
TensorFlow implementation for apply perturbations to input features based
on salency maps
:param i: index of first selected feature
:param j: index of second selected feature
:param X: a matrix containing our input features for our sample
:param increase: boolean; true if we are increasing pixels, false otherwise
:param theta: delta for each feature adjustment
:param clip_min: mininum value for a feature in our sample
:param clip_max: maximum value for a feature in our sample
: return: a perturbed input feature matrix for a target class
"""
# perturb our input sample
if increase:
X[0, i] = np.minimum(clip_max, X[0, i] + theta)
X[0, j] = np.minimum(clip_max, X[0, j] + theta)
else:
X[0, i] = np.maximum(clip_min, X[0, i] - theta)
X[0, j] = np.maximum(clip_min, X[0, j] - theta)
return X
def _update_statistics(self, new_stats, stats):
new_stats = create_dict(new_stats)
if stats is None:
stats = new_stats
return stats
# update the stats layerwise
for l_i in range(len(stats)):
for subtype,_ in subtypes:
# TODO: Have to check the type to see if this is needed
cnt_old = 1.0 * stats[l_i][subtype]['cnt']
stats[l_i][subtype]['cnt'] = (stats[l_i][subtype]['cnt']
+ new_stats[l_i][subtype]['cnt'])
norm = np.maximum(stats[l_i][subtype]['cnt'], 1.0)
for key in subtype_keys:
if key not in subtype_keys_no_aggregation:
tmp_old = cnt_old / norm * stats[l_i][subtype][key]
tmp_new = (new_stats[l_i][subtype]['cnt']
/ norm * new_stats[l_i][subtype][key])
stats[l_i][subtype][key] = tmp_old + tmp_new
return stats
def _update_statistics(self, new_stats, stats):
new_stats = create_dict(new_stats)
if stats is None:
stats = new_stats
return stats
# update the stats layerwise
for l_i in range(len(stats)):
for subtype,_ in subtypes:
# TODO: Have to check the type to see if this is needed
cnt_old = 1.0 * stats[l_i][subtype]['cnt']
stats[l_i][subtype]['cnt'] = (stats[l_i][subtype]['cnt']
+ new_stats[l_i][subtype]['cnt'])
norm = np.maximum(stats[l_i][subtype]['cnt'], 1.0)
for key in subtype_keys:
if key not in subtype_keys_no_aggregation:
tmp_old = cnt_old / norm * stats[l_i][subtype][key]
tmp_new = (new_stats[l_i][subtype]['cnt']
/ norm * new_stats[l_i][subtype][key])
stats[l_i][subtype][key] = tmp_old + tmp_new
return stats
def iou_loss(p, t):
# print "pass"
tp, tt = p.reshape((p.shape[0], 2, 2)), t.reshape((t.shape[0], 2, 2))
overlaps_t0 = T.maximum(tp[:, 0, :], tt[:, 0, :])
overlaps_t1 = T.minimum(tp[:, 1, :], tt[:, 1, :])
intersection = overlaps_t1 - overlaps_t0
bool_overlap = T.min(intersection, axis=1) > 0
intersection = intersection[:, 0] * intersection[:, 1]
intersection = T.maximum(intersection, np.float32(0.))
dims_p = tp[:, 1, :] - tp[:, 0, :]
areas_p = dims_p[:, 0] * dims_p[:, 1]
dims_t = tt[:, 1, :] - tt[:, 0, :]
areas_t = dims_t[:, 0] * dims_t[:, 1]
union = areas_p + areas_t - intersection
loss = 1. - T.minimum(
T.exp(T.log(T.abs_(intersection)) -
T.log(T.abs_(union) + np.float32(1e-5))),
np.float32(1.)
)
# return loss
return T.mean(loss)
def iou_loss_val(p, t):
tp, tt = p.reshape((p.shape[0], 2, 2)), t.reshape((t.shape[0], 2, 2))
overlaps = np.zeros_like(tp, dtype=np.float32)
overlaps[:, 0, :] = np.maximum(tp[:, 0, :], tt[:, 0, :])
overlaps[:, 1, :] = np.minimum(tp[:, 1, :], tt[:, 1, :])
intersection = overlaps[:, 1, :] - overlaps[:, 0, :]
bool_overlap = np.min(intersection, axis=1) > 0
intersection = intersection[:, 0] * intersection[:, 1]
intersection = np.maximum(intersection, 0.)
# print "bool", bool_overlap
# print "Int", intersection
dims_p = tp[:, 1, :] - tp[:, 0, :]
areas_p = dims_p[:, 0] * dims_p[:, 1]
dims_t = tt[:, 1, :] - tt[:, 0, :]
areas_t = dims_t[:, 0] * dims_t[:, 1]
union = areas_p + areas_t - intersection
# print "un", union
loss = 1. - np.minimum(
np.exp(np.log(np.abs(intersection)) - np.log(np.abs(union) + 1e-5)),
1.
)
# print loss
return np.mean(loss)
def _log_single(x):
"""Sanitized log function for a single element.
Since this method internally calls np.log and carries
the (very likely) possibility to overflow, the method
suppresses all warnings.
#XXX: at some point we might want to let ``suppress_warnings``
# specify exactly which types of warnings it should filter.
Parameters
----------
x : float, int
The number to log
Returns
-------
val : float
the log of x
"""
x = np.maximum(0, x)
val = __min_log__ if x == 0 else np.maximum(__min_log__, np.log(x))
return val