def corners_unwarp(img, nx, ny, undistorted):
M = None
warped = np.copy(img)
# Use the OpenCV undistort() function to remove distortion
undist = undistorted
# Convert undistorted image to grayscale
gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
# Search for corners in the grayscaled image
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
if ret == True:
# If we found corners, draw them! (just for fun)
cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
# Choose offset from image corners to plot detected corners
# This should be chosen to present the result at the proper aspect ratio
# My choice of 100 pixels is not exact, but close enough for our purpose here
offset = 100 # offset for dst points
# Grab the image shape
img_size = (gray.shape[1], gray.shape[0])
# For source points I'm grabbing the outer four detected corners
src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
# For destination points, I'm arbitrarily choosing some points to be
# a nice fit for displaying our warped result
# again, not exact, but close enough for our purposes
dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
[img_size[0]-offset, img_size[1]-offset],
[offset, img_size[1]-offset]])
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Warp the image using OpenCV warpPerspective()
warped = cv2.warpPerspective(undist, M, img_size)
# Return the resulting image and matrix
return warped, M
python类copy()的实例源码
def test_copyto_fromscalar():
a = np.arange(6, dtype='f4').reshape(2, 3)
# Simple copy
np.copyto(a, 1.5)
assert_equal(a, 1.5)
np.copyto(a.T, 2.5)
assert_equal(a, 2.5)
# Where-masked copy
mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
np.copyto(a, 3.5, where=mask)
assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
np.copyto(a.T, 4.5, where=mask)
assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
def normalize_array (solution, prediction):
''' Use min and max of solution as scaling factors to normalize prediction,
then threshold it to [0, 1]. Binarize solution to {0, 1}.
This allows applying classification scores to all cases.
In principle, this should not do anything to properly formatted
classification inputs and outputs.'''
# Binarize solution
sol=np.ravel(solution) # convert to 1-d array
maxi = np.nanmax((filter(lambda x: x != float('inf'), sol))) # Max except NaN and Inf
mini = np.nanmin((filter(lambda x: x != float('-inf'), sol))) # Mini except NaN and Inf
if maxi == mini:
print('Warning, cannot normalize')
return [solution, prediction]
diff = maxi - mini
mid = (maxi + mini)/2.
new_solution = np.copy(solution)
new_solution[solution>=mid] = 1
new_solution[solution<mid] = 0
# Normalize and threshold predictions (takes effect only if solution not in {0, 1})
new_prediction = (np.copy(prediction) - float(mini))/float(diff)
new_prediction[new_prediction>1] = 1 # and if predictions exceed the bounds [0, 1]
new_prediction[new_prediction<0] = 0
# Make probabilities smoother
#new_prediction = np.power(new_prediction, (1./10))
return [new_solution, new_prediction]
def _factor_target_indices(self, Y_inds, vocab_size=None, base=2):
if vocab_size is None:
vocab_size = len(self.dp.word_index)
print >>sys.stderr, "Factoring targets of vocabulary size: %d"%(vocab_size)
num_vecs = int(math.ceil(math.log(vocab_size)/math.log(base))) + 1
base_inds = []
div_Y_inds = Y_inds
print >>sys.stderr, "Number of factors: %d"%num_vecs
for i in range(num_vecs):
new_inds = div_Y_inds % base
if i == num_vecs - 1:
if new_inds.sum() == 0:
# Most significant "digit" is a zero. Omit it.
break
base_inds.append(new_inds)
div_Y_inds = numpy.copy(div_Y_inds/base)
base_vecs = [self._make_one_hot(base_inds_i, base) for base_inds_i in base_inds]
return base_vecs
def get_masks(scans,masks_list):
#%matplotlib inline
scans1=scans.copy()
maxv=255
masks=np.zeros(shape=(scans.shape[0],1,img_rows,img_cols))
for i_m in range(len(masks_list)):
for i in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
for j in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
masks[masks_list[i_m][0],0,masks_list[i_m][2]+i,masks_list[i_m][1]+j]=1
for i1 in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
scans1[masks_list[i_m][0],0,masks_list[i_m][2]+i1,masks_list[i_m][1]+masks_list[i_m][3]]=maxv=255
scans1[masks_list[i_m][0],0,masks_list[i_m][2]+i1,masks_list[i_m][1]-masks_list[i_m][3]]=maxv=255
scans1[masks_list[i_m][0],0,masks_list[i_m][2]+masks_list[i_m][3],masks_list[i_m][1]+i1]=maxv=255
scans1[masks_list[i_m][0],0,masks_list[i_m][2]-masks_list[i_m][3],masks_list[i_m][1]+i1]=maxv=255
for i in range(scans.shape[0]):
print ('scan '+str(i))
f, ax = plt.subplots(1, 2,figsize=(10,5))
ax[0].imshow(scans1[i,0,:,:],cmap=plt.cm.gray)
ax[1].imshow(masks[i,0,:,:],cmap=plt.cm.gray)
plt.show()
return(masks)
def postProcess(PDFeatures1,which):
PDFeatures2 = np.copy(PDFeatures1)
cols = np.shape(PDFeatures2)[1]
for x in xrange(cols):
indinf = np.where(np.isinf(PDFeatures2[:,x])==True)[0]
if len(indinf) > 0:
PDFeatures2[indinf,x] = 0
indnan = np.where(np.isnan(PDFeatures2[:,x])==True)[0]
if len(indnan) > 0:
PDFeatures2[indnan,x] = 0
indLN = np.where(PDFeatures2[:,0] < -1)[0]
for x in indLN:
PDFeatures2[x,0] = np.random.uniform(-0.75,-0.99,1)
term1 = (PDFeatures2[:,2]+PDFeatures2[:,3]+PDFeatures2[:,5])/3.
print term1
PDFeatures2[:,1] = 1.-term1
print "PDF",PDFeatures2[:,1]
return PDFeatures2
def pop(self):
"""
Removes and returns [priority, exp_idx] for the
the maxmimum priority element
"""
if self.size == 0:
return None
# Get max element (first element in pq_array)
max_elt = np.copy(self.pq_array[0])
# Most the last value (not necessarily the smallest) to the root
self.pq_array[0] = self.pq_array[self.size-1]
self.size -= 1
# Update hash tables
self.exp_hash[self.pq_array[0,1]], self.pq_hash[0] = 0, self.pq_array[0,1]
# Rebalance
self.__down_heap(0)
return max_elt
def __down_heap(self, i):
"""
Rebalances the heap (by moving small values down)
"""
# Calculate left and right child indices
l = 2*i+1
r = 2*i+2
# Find index of the greatest of these elements
if l < self.size and self.pq_array[l,0] > self.pq_array[i,0]:
greatest = l
else:
greatest = i
if r < self.size and self.pq_array[r,0] > self.pq_array[greatest,0]:
greatest = r
# Continue rebalancing if necessary
if greatest != i:
# swap elements at indices i, greatest
self.pq_array[i], self.pq_array[greatest] = np.copy(self.pq_array[greatest]), np.copy(self.pq_array[i])
# Update hash tables
self.exp_hash[self.pq_array[i,1]], self.exp_hash[self.pq_array[greatest,1]], self.pq_hash[i], self.pq_hash[greatest] = i, greatest, self.pq_array[i,1], self.pq_array[greatest,1]
self.__down_heap(greatest)
def apply_xy_shift(ds, xshift_m, yshift_m):
"""
Apply horizontal shift to GDAL dataset GeoTransform
Returns:
GDAL Dataset copy with updated GeoTransform
"""
print("X shift: ", xshift_m)
print("Y shift: ", yshift_m)
#Update geotransform
gt_orig = ds.GetGeoTransform()
gt_shift = np.copy(gt_orig)
gt_shift[0] += xshift_m
gt_shift[3] += yshift_m
print("Original geotransform:", gt_orig)
print("Updated geotransform:", gt_shift)
#Update ds Geotransform
ds_align = iolib.mem_drv.CreateCopy('', ds, 1)
ds_align.SetGeoTransform(gt_shift)
return ds_align
def load_nifti(filename, with_affine=False):
"""
load image from NIFTI file
Parameters
----------
filename : str
filename of NIFTI file
with_affine : bool
if True, returns affine parameters
Returns
-------
data : np.ndarray
image data
"""
img = nib.load(filename)
data = img.get_data()
data = np.copy(data, order="C")
if with_affine:
return data, img.affine
return data
def admm_phase1(x0, prob, tol=1e-2, num_iters=1000):
logging.info("Starting ADMM phase 1 with tol %.3f", tol)
z = np.copy(x0)
xs = [np.copy(x0) for i in range(prob.m)]
us = [np.zeros(prob.n) for i in range(prob.m)]
for t in range(num_iters):
if max(prob.violations(z)) < tol:
break
z = (sum(xs)-sum(us))/prob.m
for i in range(prob.m):
x, u, f = xs[i], us[i], prob.fi(i)
xs[i] = onecons_qcqp(z + u, f)
for i in range(prob.m):
us[i] += z - xs[i]
return z
def analyseparamsneighbourhood(svdata, params, includejumps, randomstate):
parameterndarray = transformparameterndarray(np.array(params), includejumps)
offsets = np.linspace(-.5, .5, 10)
for dimension in range(params.dimensioncount):
xs, ys = [], []
parametername = params.getdimensionname(dimension)
print('Perturbing %s...' % parametername)
for offset in offsets:
newparameterndarray = np.copy(parameterndarray)
newparameterndarray[dimension] += offset
xs.append(inversetransformparameterndarray(newparameterndarray, includejumps)[dimension])
y = runsvljparticlefilter(svdata, sv.Params(*inversetransformparameterndarray(newparameterndarray, includejumps)), randomstate).stochfilter.loglikelihood
ys.append(y)
fig = plt.figure()
plot = fig.add_subplot(111)
plot.plot(xs, ys)
plot.axvline(x=inversetransformparameterndarray(parameterndarray, includejumps)[dimension], color='red')
plot.set_xlabel(parametername)
plot.set_ylabel('loglikelihood')
plt.show()
def Occlusion_exp(image,occluding_size,occluding_stride,model,preprocess,classes,groundTruth):
img = np.copy(image)
height, width,_= img.shape
output_height = int(math.ceil((height-occluding_size)/occluding_stride+1))
output_width = int(math.ceil((width-occluding_size)/occluding_stride+1))
ocludedImages=[]
for h in range(output_height):
for w in range(output_width):
#occluder region
h_start = h*occluding_stride
w_start = w*occluding_stride
h_end = min(height, h_start + occluding_size)
w_end = min(width, w_start + occluding_size)
input_image = copy.copy(img)
input_image[h_start:h_end,w_start:w_end,:] = 0
ocludedImages.append(preprocess(Image.fromarray(input_image)))
L = np.empty(output_height*output_width)
L.fill(groundTruth)
L = torch.from_numpy(L)
tensor_images = torch.stack([img for img in ocludedImages])
dataset = torch.utils.data.TensorDataset(tensor_images,L)
dataloader = torch.utils.data.DataLoader(dataset,batch_size=5,shuffle=False, num_workers=8)
heatmap=np.empty(0)
model.eval()
for data in dataloader:
images, labels = data
if use_gpu:
images, labels = (images.cuda()), (labels.cuda(async=True))
outputs = model(Variable(images))
m = nn.Softmax()
outputs=m(outputs)
if use_gpu:
outs=outputs.cpu()
heatmap = np.concatenate((heatmap,outs[0:outs.size()[0],groundTruth].data.numpy()))
return heatmap.reshape((output_height, output_width))
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois_blob_real = []
for i in xrange(len(im_scale_factors)):
rois, levels = _project_im_rois(im_rois, np.array([im_scale_factors[i]]))
rois_blob = np.hstack((levels, rois))
rois_blob_real.append(rois_blob.astype(np.float32, copy=False))
return rois_blob_real
def time_hdf5():
data_path = create_hdf5(BATCH_SIZE * NSTEPS)
f = h5py.File(data_path)
durs = []
for step in tqdm.trange(NSTEPS, desc='running hdf5'):
start_time = time.time()
arr = f['data'][BATCH_SIZE * step: BATCH_SIZE * (step+1)]
read_time = time.time()
arr = copy.deepcopy(arr)
copy_time = time.time()
durs.append(['hdf5 read', step, read_time - start_time])
durs.append(['hdf5 copy', step, copy_time - read_time])
f.close()
os.remove(data_path)
durs = pandas.DataFrame(durs, columns=['kind', 'stepno', 'dur'])
return durs
def as_frames(self, from_frame, to_frame='world'):
"""Return a shallow copy of this rigid transform with just the frames
changed.
Parameters
----------
from_frame : :obj:`str`
The new from_frame.
to_frame : :obj:`str`
The new to_frame.
Returns
-------
:obj:`RigidTransform`
The RigidTransform with new frames.
"""
return RigidTransform(self.rotation, self.translation, from_frame, to_frame)
def generate_mask_rand(self, mask_pred):
pool_len = mask_pred.shape[2]
sample_num = mask_pred.shape[0]
rand_mask = np.ones((sample_num, 1, pool_len, pool_len))
mask_pixels = pool_len * pool_len
count_drop_neg = self._count_drop_neg
for i in range(sample_num):
rp = np.random.permutation(np.arange(mask_pixels))
rp = rp[0: count_drop_neg]
now_mask = np.ones(mask_pixels)
now_mask[rp] = 0
now_mask = np.reshape(now_mask, (pool_len, pool_len))
rand_mask[i,0,:,:] = np.copy(now_mask)
return rand_mask
def segment_HU_scan_frederic(x, threshold=-350):
mask = np.copy(x)
binary_part = mask > threshold
selem1 = skimage.morphology.disk(8)
selem2 = skimage.morphology.disk(2)
selem3 = skimage.morphology.disk(13)
for iz in xrange(mask.shape[0]):
# fill the body part
filled = scipy.ndimage.binary_fill_holes(binary_part[iz]) # fill body
filled_borders_mask = skimage.morphology.binary_erosion(filled, selem1)
mask[iz] *= filled_borders_mask
mask[iz] = skimage.morphology.closing(mask[iz], selem2)
mask[iz] = skimage.morphology.erosion(mask[iz], selem3)
mask[iz] = mask[iz] < threshold
return mask
def merge_alternative(self, array, low, mid, high):
left = np.copy(array[low: mid + 1])
right = np.copy(array[mid + 1: high + 1])
i, j, k = 0, 0, low
while i < len(left) and j < len(right):
self.count += 1
if left[i] < right[j]:
array[k] = left[i]
i += 1
else:
array[k] = right[j]
j += 1
k += 1
if self.visualization:
self.hist_array = np.vstack((self.hist_array, array))
while i < len(left):
array[k] = left[i]
i += 1
k += 1
while j < len(right):
array[k] = right[j]
j += 1
k += 1
def reset_target(self):
# Randomize goal position within specified bounds
self.goal = np.random.rand(3) * (self.target_bounds[:, 1] -
self.target_bounds[:, 0]
) + self.target_bounds[:, 0]
geom_positions = self.sim.model.geom_pos.copy()
prev_goal_location = geom_positions[1]
while (np.linalg.norm(prev_goal_location - self.goal) <
self.target_reset_distance):
self.goal = np.random.rand(3) * (self.target_bounds[:, 1] -
self.target_bounds[:, 0]
) + self.target_bounds[:, 0]
geom_positions[1] = self.goal
self.sim.model.geom_pos[:] = geom_positions
def parseGT(snpGT):
first = snpGT[0]
snpBinary = np.zeros(len(snpGT), dtype = "int8")
if first.find('|') != -1:
## GT is phased
separator = "|"
elif first.find('/') != -1:
## GT is not phased
separator = "/"
elif np.char.isdigit(first):
return np.array(np.copy(snpGT), dtype = "int8")
else:
die("unable to parse the format of GT in vcf!")
hetGT = "0" + separator + "1"
refGT = "0" + separator + "0"
altGT = "1" + separator + "1"
nocall = "." + separator + "."
snpBinary[np.where(snpGT == altGT)[0]] = 1
snpBinary[np.where(snpGT == hetGT)[0]] = 2
snpBinary[np.where(snpGT == nocall)[0]] = -1
return snpBinary
def create_batches(self):
self.num_batches = int(self.tensor.size / (self.batch_size *
self.seq_length))
# When the data (tensor) is too small,
# let's give them a better error message
if self.num_batches == 0:
assert False, "Not enough data. Make seq_length and batch_size small."
self.tensor = self.tensor[:self.num_batches * self.batch_size * self.seq_length]
xdata = self.tensor
ydata = np.copy(self.tensor)
ydata[:-1] = xdata[1:]
ydata[-1] = xdata[0]
self.x_batches = np.split(xdata.reshape(self.batch_size, -1),
self.num_batches, 1)
self.y_batches = np.split(ydata.reshape(self.batch_size, -1),
self.num_batches, 1)
def logpdf(self, samples):
'''
Calculates the log of the probability density function.
Parameters
----------
samples : array_like
n-by-2 matrix of samples where n is the number of samples.
Returns
-------
vals : ndarray
Log of the probability density function evaluated at `samples`.
'''
samples = np.copy(np.asarray(samples))
samples = self.__rotate_input(samples)
inner = np.all(np.bitwise_and(samples > 0.0, samples < 1.0), axis=1)
outer = np.invert(inner)
vals = np.zeros(samples.shape[0])
vals[inner] = self._logpdf(samples[inner, :])
# Assign zero mass to border
vals[outer] = -np.inf
return vals
def _get_batch_from_indices(self, indices):
"""Given a list of indices, return the potentially augmented batch."""
x_batch = []
seq_len = []
x_labels = []
for idx in range(len(indices)):
i = indices[idx]
data = self.random_scale(self.strokes[i])
data_copy = np.copy(data)
if self.augment_stroke_prob > 0:
data_copy = augment_strokes(data_copy, self.augment_stroke_prob)
x_batch.append(data_copy)
length = len(data_copy)
seq_len.append(length)
x_labels.append(self.labels[i])
seq_len = np.array(seq_len, dtype=int)
# We return three things: stroke-3 format, stroke-5 format, list of seq_len.
return x_batch, x_labels, self.pad_batch(x_batch, self.max_seq_length), seq_len
def pred_accuracy(y_true, y_pred):
y_true = sp.copy(y_true)
if len(sp.unique(y_true))==2:
print 'dichotomous trait, calculating AUC'
y_min = y_true.min()
y_max = y_true.max()
if y_min!= 0 or y_max!=1:
y_true[y_true==y_min]=0
y_true[y_true==y_max]=1
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_pred)
auc = metrics.auc(fpr, tpr)
return auc
else:
print 'continuous trait, calculating COR'
cor = sp.corrcoef(y_true,y_pred)[0,1]
return cor
def load_weights(params, path, num_conv):
print 'Loading gan weights from ' + path
with h5py.File(path, 'r') as hdf5:
params['skipthought2image'] = theano.shared(np.copy(hdf5['skipthought2image']))
params['skipthought2image-bias'] = theano.shared(np.copy(hdf5['skipthought2image-bias']))
for i in xrange(num_conv):
params['W_conv{}'.format(i)] = theano.shared(np.copy(hdf5['W_conv{}'.format(i)]))
params['b_conv{}'.format(i)] = theano.shared(np.copy(hdf5['b_conv{}'.format(i)]))
# Flip w,h axes
params['W_conv{}'.format(i)] = params['W_conv{}'.format(i)][:,:,::-1,::-1]
w = np.abs(np.copy(hdf5['W_conv{}'.format(i)]))
print 'W_conv{}'.format(i), np.min(w), np.mean(w), np.max(w)
b = np.abs(np.copy(hdf5['b_conv{}'.format(i)]))
print 'b_conv{}'.format(i), np.min(b), np.mean(b), np.max(b)
return params
def successors (config):
import math
leds = len(config)
size = int(math.sqrt(leds))
succs = []
for i in range(leds):
y = i // size
x = i % size
succ = np.copy(config)
succ[i] *= -1
if x-1 >= 0:
succ[i-1] *= -1
if x+1 < size:
succ[i+1] *= -1
if y-1 >= 0:
succ[i-size] *= -1
if y+1 < size:
succ[i+size] *= -1
succs.append(succ)
return succs
def run(ae,xs):
zs = ae.encode_binary(xs)
ys = ae.decode_binary(zs)
mod_ys = []
correlations = []
print(ys.shape)
print("corrlations:")
print("bit \ image {}".format(range(len(xs))))
for i in range(ae.N):
mod_zs = np.copy(zs)
# increase the latent value from 0 to 1 and check the difference
for j in range(11):
mod_zs[:,i] = j / 10.0
mod_ys.append(ae.decode_binary(mod_zs))
zero_zs,one_zs = np.copy(zs),np.copy(zs)
zero_zs[:,i] = 0.
one_zs[:,i] = 1.
correlation = np.mean(np.square(ae.decode_binary(zero_zs) - ae.decode_binary(one_zs)),
axis=(1,2))
correlations.append(correlation)
print("{:>5} {}".format(i,correlation))
plot_grid2(np.einsum("ib...->bi...",np.array(mod_ys)).reshape((-1,)+ys.shape[1:]),
w=11,path=ae.local("dump_significance.png"))
return np.einsum("ib->bi",correlations)
def prepare(data):
num = len(data)
dim = data.shape[1]//2
print("in prepare: ",data.shape,num,dim)
pre, suc = data[:,:dim], data[:,dim:]
suc_invalid = np.copy(suc)
random.shuffle(suc_invalid)
diff_valid = suc - pre
diff_invalid = suc_invalid - pre
inputs = np.concatenate((diff_valid,diff_invalid),axis=0)
outputs = np.concatenate((np.ones((num,1)),np.zeros((num,1))),axis=0)
print("in prepare: ",inputs.shape,outputs.shape)
io = np.concatenate((inputs,outputs),axis=1)
random.shuffle(io)
train_n = int(2*num*0.9)
train, test = io[:train_n], io[train_n:]
train_in, train_out = train[:,:dim], train[:,dim:]
test_in, test_out = test[:,:dim], test[:,dim:]
print("in prepare: ",train_in.shape, train_out.shape, test_in.shape, test_out.shape)
return train_in, train_out, test_in, test_out
def get_poly_centers(ob, type=np.float32):
mod = False
m_count = len(ob.modifiers)
if m_count > 0:
show = np.zeros(m_count, dtype=np.bool)
ren_set = np.copy(show)
ob.modifiers.foreach_get('show_render', show)
ob.modifiers.foreach_set('show_render', ren_set)
mod = True
mesh = ob.to_mesh(bpy.context.scene, True, 'RENDER')
p_count = len(mesh.polygons)
center = np.zeros(p_count * 3)#, dtype=type)
mesh.polygons.foreach_get('center', center)
center.shape = (p_count, 3)
bpy.data.meshes.remove(mesh)
if mod:
ob.modifiers.foreach_set('show_render', show)
return center