def iou_loss_val(p, t):
tp, tt = p.reshape((p.shape[0], 2, 2)), t.reshape((t.shape[0], 2, 2))
overlaps = np.zeros_like(tp, dtype=np.float32)
overlaps[:, 0, :] = np.maximum(tp[:, 0, :], tt[:, 0, :])
overlaps[:, 1, :] = np.minimum(tp[:, 1, :], tt[:, 1, :])
intersection = overlaps[:, 1, :] - overlaps[:, 0, :]
bool_overlap = np.min(intersection, axis=1) > 0
intersection = intersection[:, 0] * intersection[:, 1]
intersection = np.maximum(intersection, 0.)
# print "bool", bool_overlap
# print "Int", intersection
dims_p = tp[:, 1, :] - tp[:, 0, :]
areas_p = dims_p[:, 0] * dims_p[:, 1]
dims_t = tt[:, 1, :] - tt[:, 0, :]
areas_t = dims_t[:, 0] * dims_t[:, 1]
union = areas_p + areas_t - intersection
# print "un", union
loss = 1. - np.minimum(
np.exp(np.log(np.abs(intersection)) - np.log(np.abs(union) + 1e-5)),
1.
)
# print loss
return np.mean(loss)
python类zeros_like()的实例源码
def test_constant_network_with_tags_dry_run(self):
shape1 = loom.TypeShape('int64', (3,), 'alpha')
shape2 = loom.TypeShape('int64', (3,), 'beta')
value1 = np.array([1, 2, 3], dtype='int64')
value2 = np.array([4, 5, 6], dtype='int64')
ops = {'add1': BinaryLoomOp(shape1, tf.add),
'add2': BinaryLoomOp(shape2, tf.add)}
the_loom = loom.Loom(named_ops=ops, dry_run=True)
output_tensor1 = the_loom.output_tensor(shape1)
output_tensor2 = the_loom.output_tensor(shape2)
with self.test_session():
weaver = the_loom.make_weaver()
c1 = weaver(value1, tag='alpha')
c2 = weaver(value2, tag='beta')
result1 = output_tensor1.eval(
feed_dict=weaver.build_feed_dict([c2, c1]))
result2 = output_tensor2.eval(
feed_dict=weaver.build_feed_dict([c2, c1]))
zero_vec = np.zeros_like(value1)
self.assertTrue((result1[0] == zero_vec).all())
self.assertTrue((result2[0] == zero_vec).all())
def optimise_f2_thresholds(y, p, verbose=True, resolution=100):
def mf(x):
p2 = np.zeros_like(p)
for i in range(17):
p2[:, i] = (p[:, i] > x[i]).astype(np.int)
score = fbeta_score(y, p2, beta=2, average='samples')
return score
x = [0.2] * 17
for i in range(17):
best_i2 = 0
best_score = 0
for i2 in range(resolution):
i2 /= resolution
x[i] = i2
score = mf(x)
if score > best_score:
best_i2 = i2
best_score = score
x[i] = best_i2
if verbose:
print(i, best_i2, best_score)
return x
def buildFock(self):
"""Routine to build the AO basis Fock matrix"""
if self.direct:
if self.incFockRst: # restart incremental fock build?
self.G = formPT(self.P,np.zeros_like(self.P),self.bfs,
self.nbasis,self.screen,self.scrTol)
self.G = 0.5*(self.G + self.G.T)
self.F = self.Core.astype('complex') + self.G
else:
self.G = formPT(self.P,self.P_old,self.bfs,self.nbasis,
self.screen,self.scrTol)
self.G = 0.5*(self.G + self.G.T)
self.F = self.F_old + self.G
else:
self.J = np.einsum('pqrs,sr->pq', self.TwoE.astype('complex'),self.P)
self.K = np.einsum('psqr,sr->pq', self.TwoE.astype('complex'),self.P)
self.G = 2.*self.J - self.K
self.F = self.Core.astype('complex') + self.G
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
N = x.shape[0]
correct_class_scores = x[np.arange(N), y]
margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
margins[np.arange(N), y] = 0
loss = np.sum(margins) / N
num_pos = np.sum(margins > 0, axis=1)
dx = np.zeros_like(x)
dx[margins > 0] = 1
dx[np.arange(N), y] -= num_pos
dx /= N
return loss, dx
def sgd_momentum(w, dw, config=None):
"""
Performs stochastic gradient descent with momentum.
config format:
- learning_rate: Scalar learning rate.
- momentum: Scalar between 0 and 1 giving the momentum value.
Setting momentum = 0 reduces to sgd.
- velocity: A numpy array of the same shape as w and dw used to store a moving
average of the gradients.
"""
v = config.get('velocity', np.zeros_like(w))
next_v = config['momentum'] * v - config['learning_rate'] * dw
next_w = w + next_v
config['velocity'] = next_v
return next_w, config
def hls_select(image, thresh=(0, 255)):
# 1) Convert to HLS color space
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
H = hls[:, :, 0]
L = hls[:, :, 1]
S = hls[:, :, 2]
# 2) Apply a threshold to the S channel
thresh = (90, 255)
binary = np.zeros_like(S)
binary[(S > thresh[0]) & (S <= thresh[1])] = 1
# 3) Return a binary image of threshold result
return binary
# Define a function that applies Sobel x and y,
# then computes the direction of the gradient
# and applies a threshold.
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Take the absolute value of the x and y gradients
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
absgraddir = np.arctan2(abs_sobely, abs_sobelx)
# 5) Create a binary mask where direction thresholds are met
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary_output
# Define a function that applies Sobel x and y,
# then computes the magnitude of the gradient
# and applies a threshold
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Calculate the magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
# 5) Create a binary mask where mag thresholds are met
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary_output
# Define a function that applies Sobel x or y,
# then takes an absolute value and applies a threshold.
# Note: calling your function with orient='x', thresh_min=5, thresh_max=100
# should produce output like the example image shown above this quiz.
def abs_sobel_thresh(img, orient='x', thresh_min=0, thresh_max=255):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the derivative in x or y given orient = 'x' or 'y'
if orient == 'x':
sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
if orient == 'y':
sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
# 3) Take the absolute value of the derivative or gradient
abs_sobel = np.absolute(sobel)
# 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# 5) Create a mask of 1's where the scaled gradient magnitude
# is > thresh_min and < thresh_max
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
# 6) Return this mask as your binary_output image
return binary_output
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def update_grad_input(self, x, grad_output, scale=1):
x_cols = self.x_cols
dout = grad_output
N, C, H, W = self.x_shape
pool_height, pool_width = self.kW, self.kH
stride = self.dW
pool_dim = pool_height * pool_width
dout_reshaped = dout.transpose(2, 3, 0, 1).flatten()
dx_cols = np.zeros_like(x_cols)
dx_cols[:, np.arange(dx_cols.shape[1])] = 1. / pool_dim * dout_reshaped
dx = col2im_cython(dx_cols, N * C, 1, H, W, pool_height, pool_width,
padding=0, stride=stride)
self.grad_input = dx.reshape(self.x_shape)
return self.grad_input
def update_grad_input(self, x, grad_output, scale=1):
x_cols = self.x_cols
x_cols_argmax = self.x_cols_argmax
dout = grad_output
N, C, H, W = x.shape
pool_height, pool_width = self.kW, self.kH
stride = self.dW
dout_reshaped = dout.transpose(2, 3, 0, 1).flatten()
dx_cols = np.zeros_like(x_cols)
dx_cols[x_cols_argmax, np.arange(dx_cols.shape[1])] = dout_reshaped
dx = col2im_cython(dx_cols, N * C, 1, H, W, pool_height, pool_width,
padding=0, stride=stride)
dx = dx.reshape(self.x_shape)
self.grad_input = dx
return self.grad_input
def eval_numerical_gradient_array(f, x, df, h=1e-5):
'''
Evaluate a numeric gradient for a function that accepts a numpy
array and returns a numpy array.
'''
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
oldval = x[ix]
x[ix] = oldval + h
pos = f(x).copy()
x[ix] = oldval - h
neg = f(x).copy()
x[ix] = oldval
grad[ix] = np.sum((pos - neg) * df) / (2 * h)
it.iternext()
return grad
def nesterov(w, dw, config=None):
'''
Performs stochastic gradient descent with nesterov momentum.
config format:
- learning_rate: Scalar learning rate.
- momentum: Scalar between 0 and 1 giving the momentum value.
Setting momentum = 0 reduces to sgd.
- velocity: A numpy array of the same shape as w and dw used to store a moving
average of the gradients.
'''
if config is None:
config = {}
config.setdefault('learning_rate', 1e-2)
config.setdefault('momentum', 0.9)
v = config.get('velocity', np.zeros_like(w, dtype=np.float64))
next_w = None
prev_v = v
v = config['momentum'] * v - config['learning_rate'] * dw
next_w = w - config['momentum'] * prev_v + (1 + config['momentum']) * v
config['velocity'] = v
return next_w, config
def sgd_momentum(w, dw, config=None):
'''
Performs stochastic gradient descent with momentum.
config format:
- learning_rate: Scalar learning rate.
- momentum: Scalar between 0 and 1 giving the momentum value.
Setting momentum = 0 reduces to sgd.
- velocity: A numpy array of the same shape as w and dw used to store a moving
average of the gradients.
'''
if config is None:
config = {}
config.setdefault('learning_rate', 1e-2)
config.setdefault('momentum', 0.9)
v = config.get('velocity', np.zeros_like(w))
next_w = None
v = config['momentum'] * v + config['learning_rate'] * dw
next_w = w - v
config['velocity'] = v
return next_w, config
def test_float(self):
# offset for alignment test
for i in range(4):
assert_array_equal(self.f[i:] > 0, self.ef[i:])
assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
assert_array_equal(-self.f[i:] < 0, self.ef[i:])
assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
r = self.f[i:] != 0
assert_array_equal(r, self.ef[i:])
r2 = self.f[i:] != np.zeros_like(self.f[i:])
r3 = 0 != self.f[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
def test_double(self):
# offset for alignment test
for i in range(2):
assert_array_equal(self.d[i:] > 0, self.ed[i:])
assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
assert_array_equal(-self.d[i:] < 0, self.ed[i:])
assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
r = self.d[i:] != 0
assert_array_equal(r, self.ed[i:])
r2 = self.d[i:] != np.zeros_like(self.d[i:])
r3 = 0 != self.d[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])