def get_points():
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*8,3), np.float32)
objp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1 , 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('calibration_wide/GO*.jpg')
# Step through the list and search for chessboard corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (8,6), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
cv2.drawChessboardCorners(img, (8,6), corners, ret)
#write_name = 'corners_found'+str(idx)+'.jpg'
#cv2.imwrite(write_name, img)
cv2.imshow('img', img)
cv2.waitKey(500)
cv2.destroyAllWindows()
return objpoints, imgpoints
python类mgrid()的实例源码
def find_points(images):
pattern_size = (9, 6)
obj_points = []
img_points = []
# Assumed object points relation
a_object_point = np.zeros((PATTERN_SIZE[1] * PATTERN_SIZE[0], 3),
np.float32)
a_object_point[:, :2] = np.mgrid[0:PATTERN_SIZE[0],
0:PATTERN_SIZE[1]].T.reshape(-1, 2)
# Termination criteria for sub pixel corners refinement
stop_criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER,
30, 0.001)
print('Finding points ', end='')
debug_images = []
for (image, color_image) in images:
found, corners = cv.findChessboardCorners(image, PATTERN_SIZE, None)
if found:
obj_points.append(a_object_point)
cv.cornerSubPix(image, corners, (11, 11), (-1, -1), stop_criteria)
img_points.append(corners)
print('.', end='')
else:
print('-', end='')
if DEBUG:
cv.drawChessboardCorners(color_image, PATTERN_SIZE, corners, found)
debug_images.append(color_image)
sys.stdout.flush()
if DEBUG:
display_images(debug_images, DISPLAY_SCALE)
print('\nWas able to find points in %s images' % len(img_points))
return obj_points, img_points
# images is a lis of tuples: (gray_image, color_image)
def draw_flow(img, flow, step=8):
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
fx, fy = flow[y,x].T
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
#####################################################################
# define video capture object
def draw_flow(img, flow, step=16):
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
fx, fy = flow[y,x].T
m = np.bitwise_and(np.isfinite(fx), np.isfinite(fy))
lines = np.vstack([x[m], y[m], x[m]+fx[m], y[m]+fy[m]]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
def __init__(self, pos, size):
"""
pos is (...,3) array of the bar positions (the corner of each bar)
size is (...,3) array of the sizes of each bar
"""
nCubes = reduce(lambda a,b: a*b, pos.shape[:-1])
cubeVerts = np.mgrid[0:2,0:2,0:2].reshape(3,8).transpose().reshape(1,8,3)
cubeFaces = np.array([
[0,1,2], [3,2,1],
[4,5,6], [7,6,5],
[0,1,4], [5,4,1],
[2,3,6], [7,6,3],
[0,2,4], [6,4,2],
[1,3,5], [7,5,3]]).reshape(1,12,3)
size = size.reshape((nCubes, 1, 3))
pos = pos.reshape((nCubes, 1, 3))
verts = cubeVerts * size + pos
faces = cubeFaces + (np.arange(nCubes) * 8).reshape(nCubes,1,1)
md = MeshData(verts.reshape(nCubes*8,3), faces.reshape(nCubes*12,3))
GLMeshItem.__init__(self, meshdata=md, shader='shaded', smooth=False)
def __init__(self, pos, size):
"""
pos is (...,3) array of the bar positions (the corner of each bar)
size is (...,3) array of the sizes of each bar
"""
nCubes = reduce(lambda a,b: a*b, pos.shape[:-1])
cubeVerts = np.mgrid[0:2,0:2,0:2].reshape(3,8).transpose().reshape(1,8,3)
cubeFaces = np.array([
[0,1,2], [3,2,1],
[4,5,6], [7,6,5],
[0,1,4], [5,4,1],
[2,3,6], [7,6,3],
[0,2,4], [6,4,2],
[1,3,5], [7,5,3]]).reshape(1,12,3)
size = size.reshape((nCubes, 1, 3))
pos = pos.reshape((nCubes, 1, 3))
verts = cubeVerts * size + pos
faces = cubeFaces + (np.arange(nCubes) * 8).reshape(nCubes,1,1)
md = MeshData(verts.reshape(nCubes*8,3), faces.reshape(nCubes*12,3))
GLMeshItem.__init__(self, meshdata=md, shader='shaded', smooth=False)
def make3dplot(ax, sample, density):
ax.scatter(sample[:,0], sample[:,1], zdir='z')
ax.set_aspect('equal', 'datalim')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
gridsize=50
xs, ys = np.mgrid[xlim[0]:xlim[1]:(xlim[1]-xlim[0])/float(gridsize), ylim[0]:ylim[1]:(ylim[1]-ylim[0])/float(gridsize)]
pos = np.empty(xs.shape + (2,))
pos[:, :, 0] = xs; pos[:, :, 1] = ys
zs = density(pos)
surf = ax.plot_surface(xs, ys, zs, rstride=1, cstride=1, linewidth=0, antialiased=False, alpha=.3)
ax.set_xlabel('x')
ax.set_ylabel('y')
def nufft_T(N, J, K, alpha, beta):
'''
equation (29) and (26)Fessler's paper
create the overlapping matrix CSSC (diagonal dominent matrix)
of J points
and then find out the pseudo-inverse of CSSC '''
# import scipy.linalg
L = numpy.size(alpha) - 1
# print('L = ', L, 'J = ',J, 'a b', alpha,beta )
cssc = numpy.zeros((J, J))
[j1, j2] = numpy.mgrid[1:J + 1, 1:J + 1]
overlapping_mat = j2 - j1
for l1 in range(-L, L + 1):
for l2 in range(-L, L + 1):
alf1 = alpha[abs(l1)]
# if l1 < 0: alf1 = numpy.conj(alf1)
alf2 = alpha[abs(l2)]
# if l2 < 0: alf2 = numpy.conj(alf2)
tmp = overlapping_mat + beta * (l1 - l2)
tmp = dirichlet(1.0 * tmp / (1.0 * K / N))
cssc = cssc + alf1 * numpy.conj(alf2) * tmp
return mat_inv(cssc)
def nufft_T(N, J, K, alpha, beta):
'''
The Equation (29) and (26) in Fessler and Sutton 2003.
Create the overlapping matrix CSSC (diagonal dominent matrix)
of J points and find out the pseudo-inverse of CSSC '''
# import scipy.linalg
L = numpy.size(alpha) - 1
# print('L = ', L, 'J = ',J, 'a b', alpha,beta )
cssc = numpy.zeros((J, J))
[j1, j2] = numpy.mgrid[1:J + 1, 1:J + 1]
overlapping_mat = j2 - j1
for l1 in range(-L, L + 1):
for l2 in range(-L, L + 1):
alf1 = alpha[abs(l1)]
# if l1 < 0: alf1 = numpy.conj(alf1)
alf2 = alpha[abs(l2)]
# if l2 < 0: alf2 = numpy.conj(alf2)
tmp = overlapping_mat + beta * (l1 - l2)
tmp = dirichlet(1.0 * tmp / (1.0 * K / N))
cssc = cssc + alf1 * alf2 * tmp
return mat_inv(cssc)
def x_frame2D(X, plot_limits=None, resolution=None):
"""
Internal helper function for making plots, returns a set of input values to plot as well as lower and upper limits
"""
assert X.shape[1] == 2, \
'x_frame2D is defined for two-dimensional inputs'
if plot_limits is None:
(xmin, xmax) = (X.min(0), X.max(0))
(xmin, xmax) = (xmin - 0.2 * (xmax - xmin), xmax + 0.2 * (xmax
- xmin))
elif len(plot_limits) == 2:
(xmin, xmax) = plot_limits
else:
raise ValueError, 'Bad limits for plotting'
resolution = resolution or 50
(xx, yy) = np.mgrid[xmin[0]:xmax[0]:1j * resolution, xmin[1]:
xmax[1]:1j * resolution]
Xnew = np.vstack((xx.flatten(), yy.flatten())).T
return (Xnew, xx, yy, xmin, xmax)
def test_pdf(self):
'''
Tests the probability density function.
'''
# Calculate probability density function on lattice
bnds = np.empty((3), dtype=object)
bnds[0] = [-1, 1]
bnds[1] = [0, 2]
bnds[2] = [0.5, 2]
(x0g, x1g, x2g) = np.mgrid[bnds[0][0]:bnds[0][1],
bnds[1][0]:bnds[1][1],
bnds[2][0]:bnds[2][1]]
points = np.array([x0g.ravel(), x1g.ravel(), x2g.ravel()]).T
r_logpdf = np.array([-6.313469, -17.406428, -4.375992, -6.226508,
-8.836115, -20.430739, -5.107053, -6.687987])
p_logpdf = self.vine.logpdf(points)
assert_allclose(p_logpdf, r_logpdf)
r_pdf = np.array([1.811738e-03, 2.757302e-08, 1.257566e-02,
1.976342e-03, 1.453865e-04, 1.339808e-09,
6.053895e-03, 1.245788e-03])
p_pdf = self.vine.pdf(points)
assert_allclose(p_pdf, r_pdf, rtol=1e-5)
def plotImage(dta, saveFigName):
plt.clf()
dx, dy = 1, 1
# generate 2 2d grids for the x & y bounds
with np.errstate(invalid='ignore'):
y, x = np.mgrid[
slice(0, len(dta) , dx),
slice(0, len(dta[0]), dy)
]
z = dta
z_min, z_max = -np.abs(z).max(), np.abs(z).max()
#try:
c = plt.pcolormesh(x, y, z, cmap='hsv', vmin=z_min, vmax=z_max)
#except ??? as err: # data not regular?
# c = plt.pcolor(x, y, z, cmap='hsv', vmin=z_min, vmax=z_max)
d = plt.colorbar(c, orientation='vertical')
lx = plt.xlabel("index")
ly = plt.ylabel("season length")
plt.savefig(str(saveFigName))
def areaxy(self, lowerbound=-np.inf, upperbound=np.inf, spacing=0.1):
mask = (self.coord[:,2] > lowerbound) & (self.coord[:,2] < upperbound)
points = self.coord[mask, :2]
# The magic number factor 1.1 is not critical at all
# Just a number to set a margin to the bounding box and
# have all points fall within the boundaries
bbmin, bbmax = 1.1*points.min(axis=0), 1.1*points.max(axis=0)
size = bbmax - bbmin
cells = (size / spacing + 0.5).astype('int')
# Grid points over bounding box with specified spacing
grid = np.mgrid[bbmin[0]:bbmax[0]:(cells[0]*1j),
bbmin[1]:bbmax[1]:(cells[1]*1j)].reshape((2,-1)).T
# Occupied cells is approximately equal to grid points within
# gridspacing distance of points
occupied = occupancy(grid, points, spacing)
# The occupied area follows from the fraction of occupied
# cells times the area spanned by the bounding box
return size[0]*size[1]*sum(occupied > 0)/occupied.size
def generate_hills(width, height, nhills):
'''
@param width float, terrain width
@param height float, terrain height
@param nhills int, #hills to gen. #hills actually generted is sqrt(nhills)^2
'''
# setup coordinate grid
xmin, xmax = -width/2.0, width/2.0
ymin, ymax = -height/2.0, height/2.0
x, y = np.mgrid[xmin:xmax:STEP, ymin:ymax:STEP]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x; pos[:, :, 1] = y
# generate hilltops
xm, ym = np.mgrid[xmin:xmax:width/np.sqrt(nhills), ymin:ymax:height/np.sqrt(nhills)]
mu = np.c_[xm.flat, ym.flat]
sigma = float(width*height)/(nhills*8)
for i in range(mu.shape[0]):
mu[i] = multivariate_normal.rvs(mean=mu[i], cov=sigma)
# generate hills
sigma = sigma + sigma*np.random.rand(mu.shape[0])
rvs = [ multivariate_normal(mu[i,:], cov=sigma[i]) for i in range(mu.shape[0]) ]
hfield = np.max([ rv.pdf(pos) for rv in rvs ], axis=0)
return x, y, hfield
def joint_density(X, Y, bounds=None):
"""
Plots joint distribution of variables.
Inherited from method in src/graphics.py module in project
git://github.com/aflaxman/pymc-example-tfr-hdi.git
"""
if bounds:
X_min, X_max, Y_min, Y_max = bounds
else:
X_min = X.min()
X_max = X.max()
Y_min = Y.min()
Y_max = Y.max()
pylab.plot(X, Y, linestyle='none', marker='o', color='green', mec='green', alpha=.2, zorder=-99)
gkde = scipy.stats.gaussian_kde([X, Y])
x,y = pylab.mgrid[X_min:X_max:(X_max-X_min)/25.,Y_min:Y_max:(Y_max-Y_min)/25.]
z = pylab.array(gkde.evaluate([x.flatten(), y.flatten()])).reshape(x.shape)
pylab.contour(x, y, z, linewidths=2)
pylab.axis([X_min, X_max, Y_min, Y_max])
def hyperball(ndim, radius):
"""Return a binary morphological filter containing pixels within `radius`.
Parameters
----------
ndim : int
The number of dimensions of the filter.
radius : int
The radius of the filter.
Returns
-------
ball : array of bool, shape [2 * radius + 1,] * ndim
The required structural element
"""
size = 2 * radius + 1
center = [(radius,) * ndim]
coords = np.mgrid[[slice(None, size),] * ndim].reshape(ndim, -1).T
distances = np.ravel(spatial.distance_matrix(coords, center))
selector = distances <= radius
ball = np.zeros((size,) * ndim, dtype=bool)
ball.ravel()[selector] = True
return ball
def _generate_random_grids(self):
if self.num_grids > 40:
starter = np.random.randint(0, 20)
random_sample = np.mgrid[starter:len(self.grids)-1:20j].astype("int32")
# We also add in a bit to make sure that some of the grids have
# particles
gwp = self.grid_particle_count > 0
if np.any(gwp) and not np.any(gwp[(random_sample,)]):
# We just add one grid. This is not terribly efficient.
first_grid = np.where(gwp)[0][0]
random_sample.resize((21,))
random_sample[-1] = first_grid
mylog.debug("Added additional grid %s", first_grid)
mylog.debug("Checking grids: %s", random_sample.tolist())
else:
random_sample = np.mgrid[0:max(len(self.grids),1)].astype("int32")
return self.grids[(random_sample,)]
def test_linear_interpolator_2d():
random_data = np.random.random((64, 64))
# evenly spaced bins
fv = dict((ax, v) for ax, v in zip("xyz",
np.mgrid[0.0:1.0:64j, 0.0:1.0:64j]))
bfi = lin.BilinearFieldInterpolator(random_data,
(0.0, 1.0, 0.0, 1.0), "xy", True)
assert_array_equal(bfi(fv), random_data)
# randomly spaced bins
size = 64
bins = np.linspace(0.0, 1.0, size)
shifts = dict((ax, (1. / size) * np.random.random(size) - (0.5 / size)) \
for ax in "xy")
fv["x"] += shifts["x"][:, np.newaxis]
fv["y"] += shifts["y"]
bfi = lin.BilinearFieldInterpolator(random_data,
(bins + shifts["x"], bins + shifts["y"]), "xy", True)
assert_array_almost_equal(bfi(fv), random_data, 15)
def test_linear_interpolator_3d():
random_data = np.random.random((64, 64, 64))
# evenly spaced bins
fv = dict((ax, v) for ax, v in zip("xyz",
np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j]))
tfi = lin.TrilinearFieldInterpolator(random_data,
(0.0, 1.0, 0.0, 1.0, 0.0, 1.0), "xyz", True)
assert_array_almost_equal(tfi(fv), random_data)
# randomly spaced bins
size = 64
bins = np.linspace(0.0, 1.0, size)
shifts = dict((ax, (1. / size) * np.random.random(size) - (0.5 / size)) \
for ax in "xyz")
fv["x"] += shifts["x"][:, np.newaxis, np.newaxis]
fv["y"] += shifts["y"][:, np.newaxis]
fv["z"] += shifts["z"]
tfi = lin.TrilinearFieldInterpolator(random_data,
(bins + shifts["x"], bins + shifts["y"],
bins + shifts["z"]), "xyz", True)
assert_array_almost_equal(tfi(fv), random_data, 15)
def partition_index_2d(self, axis):
if not self._distributed:
return False, self.index.grid_collection(self.center,
self.index.grids)
xax = self.ds.coordinates.x_axis[axis]
yax = self.ds.coordinates.y_axis[axis]
cc = MPI.Compute_dims(self.comm.size, 2)
mi = self.comm.rank
cx, cy = np.unravel_index(mi, cc)
x = np.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
y = np.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]
DLE, DRE = self.ds.domain_left_edge.copy(), self.ds.domain_right_edge.copy()
LE = np.ones(3, dtype='float64') * DLE
RE = np.ones(3, dtype='float64') * DRE
LE[xax] = x[0] * (DRE[xax]-DLE[xax]) + DLE[xax]
RE[xax] = x[1] * (DRE[xax]-DLE[xax]) + DLE[xax]
LE[yax] = y[0] * (DRE[yax]-DLE[yax]) + DLE[yax]
RE[yax] = y[1] * (DRE[yax]-DLE[yax]) + DLE[yax]
mylog.debug("Dimensions: %s %s", LE, RE)
reg = self.ds.region(self.center, LE, RE)
return True, reg
def init_fill(self):
rext = 1.0
Ns = 51
x, y = np.mgrid[ -rext:rext:1j*Ns, -rext:rext:1j*Ns ]
z = np.zeros_like(x)
self.controller.ax_xstress.pcolor(x,y,z, cmap=plt.cm.coolwarm)
self.controller.ax_ystress.pcolor(x,y,z, cmap=plt.cm.coolwarm)
self.controller.ax_xystress.pcolor(x,y,z, cmap=plt.cm.coolwarm)
self.controller.ax_rstress.pcolor(x,y,z, cmap=plt.cm.coolwarm)
self.controller.ax_tstress.pcolor(x,y,z, cmap=plt.cm.coolwarm)
return
# =======================
def evaluate_model(self, model):
"""
This function ...
:param model:
:return:
"""
# Make a local copy of the model so that we can adapt its position to be relative to this box
rel_model = fitting.shifted_model(model, -self.x_min, -self.y_min)
# Create x and y meshgrid for evaluating
y_values, x_values = np.mgrid[:self.ysize, :self.xsize]
# Evaluate the model
data = rel_model(x_values, y_values)
# Return a new box
return Box(data, self.x_min, self.x_max, self.y_min, self.y_max)
# -----------------------------------------------------------------
def evaluate_model(self, model):
"""
This function ...
:param model:
:return:
"""
# Make a local copy of the model so that we can adapt its position to be relative to this box
rel_model = fitting.shifted_model(model, -self.x_min, -self.y_min)
# Create x and y meshgrid for evaluating
y_values, x_values = np.mgrid[:self.ysize, :self.xsize]
# Evaluate the model
data = rel_model(x_values, y_values)
# Return a new box
return Box(data, self.x_min, self.x_max, self.y_min, self.y_max)
# -----------------------------------------------------------------
def polarToLinearMaps(orig_shape, out_shape=None, center=None):
s0, s1 = orig_shape
if out_shape is None:
out_shape = (int(round(2 * s0 / 2**0.5)) - (1 - s0 % 2),
int(round(2 * s1 / (2 * np.pi) / 2**0.5)))
ss0, ss1 = out_shape
if center is None:
center = ss1 // 2, ss0 // 2
yy, xx = np.mgrid[0:ss0:1., 0:ss1:1.]
r, phi = _cart2polar(xx, yy, center)
# scale-pi...pi->0...s1:
phi = (phi + np.pi) / (2 * np.pi) * (s1 - 2)
return phi.astype(np.float32), r.astype(np.float32)
def calculateExtrinsics(self, cameraParameters):
'''
Inputs:
cameraParameters is CameraParameters object
Calculate: rotate vector and transform vector
>>> marker.calculateExtrinsics(camera_matrix, dist_coeff)
>>> print(marker.rvec, marker.tvec)
'''
object_points = np.zeros((4,3), dtype=np.float32)
object_points[:,:2] = np.mgrid[0:2,0:2].T.reshape(-1,2)
# Test Code.
# object_points[:] -= 0.5
marker_points = self.corners
if marker_points is None: raise TypeError('The marker.corners is None')
camera_matrix = cameraParameters.camera_matrix
dist_coeff = cameraParameters.dist_coeff
ret, rvec, tvec = cv2.solvePnP(object_points, marker_points,
camera_matrix, dist_coeff)
if ret: self.rvec, self.tvec = rvec, tvec
return ret
def calculateExtrinsics(self, cameraParameters):
'''
Inputs:
cameraParameters is CameraParameters object
Calculate: rotate vector and transform vector
>>> marker.calculateExtrinsics(camera_matrix, dist_coeff)
>>> print(marker.rvec, marker.tvec)
'''
object_points = np.zeros((4,3), dtype=np.float32)
object_points[:,:2] = np.mgrid[0:2,0:2].T.reshape(-1,2)
# Test Code.
# object_points[:] -= 0.5
marker_points = self.corners
if marker_points is None: raise TypeError('The marker.corners is None')
camera_matrix = cameraParameters.camera_matrix
dist_coeff = cameraParameters.dist_coeff
ret, rvec, tvec = cv2.solvePnP(object_points, marker_points,
camera_matrix, dist_coeff)
if ret: self.rvec, self.tvec = rvec, tvec
return ret
def calculateExtrinsics(self, cameraParameters):
'''
Inputs:
cameraParameters is CameraParameters object
Calculate: rotate vector and transform vector
>>> marker.calculateExtrinsics(camera_matrix, dist_coeff)
>>> print(marker.rvec, marker.tvec)
'''
object_points = np.zeros((4,3), dtype=np.float32)
object_points[:,:2] = np.mgrid[0:2,0:2].T.reshape(-1,2)
# Test Code.
# object_points[:] -= 0.5
marker_points = self.corners
if marker_points is None: raise TypeError('The marker.corners is None')
camera_matrix = cameraParameters.camera_matrix
dist_coeff = cameraParameters.dist_coeff
ret, rvec, tvec = cv2.solvePnP(object_points, marker_points,
camera_matrix, dist_coeff)
if ret: self.rvec, self.tvec = rvec, tvec
return ret
def calculateExtrinsics(self, cameraParameters):
'''
Inputs:
cameraParameters is CameraParameters object
Calculate: rotate vector and transform vector
>>> marker.calculateExtrinsics(camera_matrix, dist_coeff)
>>> print(marker.rvec, marker.tvec)
'''
object_points = np.zeros((4,3), dtype=np.float32)
object_points[:,:2] = np.mgrid[0:2,0:2].T.reshape(-1,2)
# Test Code.
# object_points[:] -= 0.5
marker_points = self.corners
if marker_points is None: raise TypeError('The marker.corners is None')
camera_matrix = cameraParameters.camera_matrix
dist_coeff = cameraParameters.dist_coeff
ret, rvec, tvec = cv2.solvePnP(object_points, marker_points,
camera_matrix, dist_coeff)
if ret: self.rvec, self.tvec = rvec, tvec
return ret
def _compute_gaussian_kernel(histogram_shape, relative_bw):
"""Compute a gaussian kernel double the size of the histogram matrix"""
if len(histogram_shape) == 2:
kernel_shape = [2 * n for n in histogram_shape]
# Create a scaled grid in which the kernel is symmetric to avoid matrix
# inversion problems when the bandwiths are very different
bw_ratio = relative_bw[0] / relative_bw[1]
bw = relative_bw[0]
X, Y = np.mgrid[-bw_ratio:bw_ratio:kernel_shape[0] * 1j,
-1:1:kernel_shape[1] * 1j]
grid_points = np.vstack([X.ravel(), Y.ravel()]).T
Cov = np.array(((bw, 0), (0, bw)))**2
K = stats.multivariate_normal.pdf(grid_points, mean=(0, 0), cov=Cov)
return K.reshape(kernel_shape)
else:
grid = np.mgrid[-1:1:histogram_shape[0] * 2j]
return stats.norm.pdf(grid, loc=0, scale=relative_bw)
def draw_flow(img, flow, step=16):
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2, -1).astype(int) # ????????????????????????16?reshape?2??array
fx, fy = flow[y, x].T # ???????????????
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2) # ????????????2*2???
lines = np.int32(lines + 0.5) # ????????????
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0)) # ???????????????
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1) # ???????????????????
return vis