def iuwt(wave, convol2d =0):
mode = 'nearest'
lvl,n1,n2 = np.shape(wave)
h = np.array([1./16, 1./4, 3./8, 1./4, 1./16])
n = np.size(h)
cJ = np.copy(wave[lvl-1,:,:])
for i in np.linspace(1,lvl-1,lvl-1):
newh = np.zeros((1,n+(n-1)*(2**(lvl-1-i)-1)))
newh[0,np.int_(np.linspace(0,np.size(newh)-1,len(h)))] = h
H = np.dot(newh.T,newh)
###### Line convolution
if convol2d == 1:
cnew = cp.convolve2d(cJ, H, mode='same', boundary='symm')
else:
cnew = sc.convolve1d(cJ,newh[0,:],axis = 0, mode = mode)
###### Column convolution
cnew = sc.convolve1d(cnew,newh[0,:],axis = 1, mode = mode)
cJ = cnew+wave[lvl-1-i,:,:]
return np.reshape(cJ,(n1,n2))
python类convolve2d()的实例源码
def three_melody_matrices(track_id, win=4.0):
t, melstm, melmat = two_melody_matrices(track_id)
dt = t[1] - t[0]
nkern = np.round(win / dt)
kern1 = np.ones((nkern, 1))
kern2 = np.zeros((nkern + 1, 1))
kern = np.vstack((kern1, kern2))
kern *= 1.0 / nkern
melfwd = dsp.convolve2d(melmat, kern, mode='same')
return t, melstm, melmat, melfwd
def two_melody_matrices(track_id, win=4.0):
t, melmat = one_melody_matrix(track_id)
dt = t[1] - t[0]
nkern = int(np.round(win / dt))
kern1 = np.zeros((nkern + 1, 1))
kern2 = np.ones((nkern, 1))
kern = np.vstack((kern1, kern2))
kern *= 1.0 / nkern
melstm = dsp.convolve2d(melmat, kern, mode='same')
return t, melstm, melmat
def convolve_flatten(X):
# input will be (32, 32, 3, N)
# output will be (N, 32*32)
N = X.shape[-1]
flat = np.zeros((N, 32*32))
for i in xrange(N):
#flat[i] = X[:,:,:,i].reshape(3072)
bw = X[:,:,:,i].mean(axis=2) # make it grayscale
Gx = convolve2d(bw, Hx, mode='same')
Gy = convolve2d(bw, Hy, mode='same')
G = np.sqrt(Gx*Gx + Gy*Gy)
G /= G.max() # normalize it
flat[i] = G.reshape(32*32)
return flat
def get_depth_normals(self, depth_map):
h, w = np.shape(depth_map)
zz = depth_map
xx, yy = np.meshgrid(range(0, h), range(0, w))
xx = (xx / (h - 1.0) * 0.5) * self.sensor_mm * zz / self.focal_length_mm
yy = (yy / (w - 1.0) * 0.5) * self.sensor_mm * zz / self.focal_length_mm
kernel = np.asarray([[3., 10., 3.], [0., 0., 0.], [-3., -10., -3.]])
kernel /= 64.
dxdx = ssig.convolve2d(xx, kernel, mode="same", boundary="wrap")
dydx = ssig.convolve2d(yy, kernel, mode="same", boundary="wrap")
dzdx = ssig.convolve2d(zz, kernel, mode="same", boundary="wrap")
dxdy = ssig.convolve2d(xx, np.transpose(kernel), mode="same", boundary="wrap")
dydy = ssig.convolve2d(yy, np.transpose(kernel), mode="same", boundary="wrap")
dzdy = ssig.convolve2d(zz, np.transpose(kernel), mode="same", boundary="wrap")
normal_map = np.full((h, w, 3), fill_value=np.nan)
normal_map[:, :, 0] = (dzdx * dxdy - dxdx * dzdy)
normal_map[:, :, 1] = - (dydx * dzdy - dzdx * dydy)
normal_map[:, :, 2] = - (dxdx * dydy - dydx * dxdy)
magnitude = np.sqrt(np.sum(np.square(normal_map), axis=2))
normal_map = normal_map / np.dstack((magnitude, magnitude, magnitude))
return normal_map
def PsfBlur(img, psfid):
imgarray = np.array(img, dtype="float32")
kernel = psfDictionary[psfid]
convolved = convolve2d(imgarray, kernel, mode='same', fillvalue=255.0).astype("uint8")
img = Image.fromarray(convolved)
return img
def BoxBlur(img, dim):
imgarray = np.array(img, dtype="float32")
kernel = BoxKernel(dim)
convolved = convolve2d(imgarray, kernel, mode='same', fillvalue=255.0).astype("uint8")
img = Image.fromarray(convolved)
return img
def LinearMotionBlur(img, dim, angle, linetype):
imgarray = np.array(img, dtype="float32")
kernel = LineKernel(dim, angle, linetype)
convolved = convolve2d(imgarray, kernel, mode='same', fillvalue=255.0).astype("uint8")
img = Image.fromarray(convolved)
return img
def DefocusBlur(img, dim):
imgarray = np.array(img, dtype="float32")
kernel = DiskKernel(dim)
convolved = convolve2d(imgarray, kernel, mode='same', fillvalue=255.0).astype("uint8")
img = Image.fromarray(convolved)
return img
def calcDirection(img):
sobel_x=np.array([[1, 0, -1],[2, 0, -2],[1, 0, -1]])
sobel_y=np.array([[1, 2, 1],[0, 0, 0],[-1,-2,-1]])
par_x=convolve2d(img,sobel_x,mode='same')
par_y=convolve2d(img,sobel_y,mode='same')
Vy=2*np.sum(par_x*par_y)
Vx=np.sum(par_y**2-par_x**2)
theta=0.5*np.arctan2(Vy,Vx)#+np.pi/2
return theta
def segmentation(img, blockSize=8, h=352, w=288):
add0=(16-img.shape[0]%16)/2
add1=(16-img.shape[1]%16)/2
img=np.vstack(( 255*np.ones((add0,img.shape[1])), img, 255*np.ones((add0,img.shape[1])) ))
img=np.hstack(( 255*np.ones((img.shape[0],add1)), img, 255*np.ones((img.shape[0],add1)) ))
# img=np.uint8(img)
## reference: IMPROVED FINGERPRINT IMAGE SEGMENTATION USING NEW MODIFIED GRADIENT
# BASED TECHNIQUE
sobel_x=np.array([[1, 0, -1],[2, 0, -2],[1, 0, -1]])
sobel_y=np.array([[1, 2, 1],[0, 0, 0],[-1,-2,-1]])
par_x=convolve2d(img,sobel_x,mode='same')
par_y=convolve2d(img,sobel_y,mode='same')
#img=basic.blockproc(img,cv2.equalizeHist,(blockSize,blockSize))
stdx=blockproc(par_x,np.std,(16,16),True)
stdy=blockproc(par_y,np.std,(16,16),True)
grddev=stdx+stdy
threshold=90
index=grddev[1:-1,1:-1].copy()
index[np.where(index<threshold)]=0
index[np.where(index>=threshold)]=1
a=np.zeros(grddev.shape)
a[1:-1,1:-1]=index
index=a
valid=np.zeros(img.shape)
valid_b=block_view(valid,(16,16))
valid_b[:]=index[:,:,np.newaxis,np.newaxis]
kernel = np.ones((8,8),np.uint8)
# first dilate to delete the invalid value inside the fingerprint region
valid=cv2.dilate(valid,kernel,iterations = 5)
# then erode more to delete the valid value outside the fingerprint region
valid=cv2.erode(valid, kernel, iterations = 12)
# dilate again to increase the valid value area in compensate for the lose
# due to erosion in the last step
valid=cv2.dilate(valid, kernel, iterations=7)
img[np.where(valid==0)]=255
# align the image
#img=align(img, valid)
return cut(img, valid, h, w)
def calcDirection(img,blockSize,method='block-wise'):
"""calculate ridge directions in an image, using gradient method
return: ridge directions
"""
sobel_x=np.array([[1, 0, -1],[2, 0, -2],[1, 0, -1]])
sobel_y=np.array([[1, 2, 1],[0, 0, 0],[-1,-2,-1]])
par_x=convolve2d(img,sobel_x,mode='same')
par_y=convolve2d(img,sobel_y,mode='same')
N,M=np.shape(img)
if method=='block-wise':
Vx=np.zeros((N/blockSize,M/blockSize))
Vy=np.zeros((N/blockSize,M/blockSize))
for i in xrange(N/blockSize):
for j in xrange(M/blockSize):
a=i*blockSize;b=a+blockSize;c=j*blockSize;d=c+blockSize
Vy[i,j]=2*np.sum(par_x[a:b,c:d]*par_y[a:b,c:d])
Vx[i,j]=np.sum(par_y[a:b,c:d]**2-par_x[a:b,c:d]**2)
elif method=='pixel-wise':
Vx,Vy=np.zeros((N,M)),np.zeros((N,M))
for i in xrange(blockSize/2,N-blockSize/2):
a=i-blockSize/2
b=a+blockSize
for j in xrange(blockSize/2,M-blockSize/2):
c=j-blockSize/2
d=c+blockSize
Vy[i,j]=2*np.sum(par_x[a:b,c:d]*par_y[a:b,c:d])
Vx[i,j]=np.sum(par_y[a:b,c:d]**2-par_x[a:b,c:d]**2)
gaussianBlurSigma=2;
gaussian_block=5 if method=='block-wise' else 21
Vy=cv2.GaussianBlur(Vy,(gaussian_block,gaussian_block),gaussianBlurSigma,gaussianBlurSigma)
Vx=cv2.GaussianBlur(Vx,(gaussian_block,gaussian_block),gaussianBlurSigma,gaussianBlurSigma)
theta=0.5*np.arctan2(Vy,Vx)
return theta
def calcDirection(img):
sobel_x=np.array([[1, 0, -1],[2, 0, -2],[1, 0, -1]])
sobel_y=np.array([[1, 2, 1],[0, 0, 0],[-1,-2,-1]])
par_x=convolve2d(img,sobel_x,mode='same')
par_y=convolve2d(img,sobel_y,mode='same')
Vy=2*np.sum(par_x*par_y)
Vx=np.sum(par_y**2-par_x**2)
theta=0.5*np.arctan2(Vy,Vx)#+np.pi/2
return theta
def apply_filter(X, H):
"""convolve image X with 2D matrix H. Returns a new modified matrix"""
I = X.copy()
for c in range(0, pgc.num_channels(X)):
I[:, :, c] = signal.convolve2d(I[:, :, c], H, mode='same')
return I
def windowData(instanceArray, windowSize):
window = np.ones((windowSize, 1))
windowed = convolve2d(instanceArray, window, mode="valid")
return windowed[::windowSize,:]
def backward(self, grad_output):
input, filter = self.saved_tensors
grad_input = convolve2d(grad_output.numpy(), filter.t().numpy(), mode='full')
grad_filter = convolve2d(input.numpy(), grad_output.numpy(), mode='valid')
return torch.FloatTensor(grad_input), torch.FloatTensor(grad_filter)
def fd_conv(Img_xy, h2d, mode ='same'):
#return convolve2d(Img_xy, h2d, mode=mode)
return fftconvolve(Img_xy, h2d, mode=mode)
def fd_conv(Img_xy, h2d, mode ='same'):
#return convolve2d(Img_xy, h2d, mode=mode)
return fftconvolve(Img_xy, h2d, mode=mode)
def compute_flow(impath1, impath2, outdir,
fbcodepath=os.getenv("HOME") + '/fbcode'):
stem = os.path.splitext(os.path.basename(impath1))[0]
deepmatch_cmd = os.path.join(fbcodepath,
'_bin/experimental/deeplearning/dpathak' +
'/video-processing/deepmatch/deepmatch')
call([deepmatch_cmd, impath1, impath2, '-out',
os.path.join(outdir, stem + '_sparse.txt'), '-downscale', '2'])
img1 = cv2.imread(impath1).astype(float)
M = np.zeros((img1.shape[0], img1.shape[1]), dtype=np.float32)
filt = np.array([[1., -1.]]).reshape((1, -1))
for c in range(3):
gx = convolve2d(img1[:, :, c], filt, mode='same')
gy = convolve2d(img1[:, :, c], filt.T, mode='same')
M = M + gx**2 + gy**2
M = M / np.max(M)
with open(os.path.join(outdir, '_edges.bin'), 'w') as f:
M.tofile(f)
epicflow_command = os.path.join(fbcodepath,
'_bin/experimental/deeplearning/dpathak' +
'/video-processing/epicflow/epicflow')
call([epicflow_command, impath1, impath2,
os.path.join(outdir, '_edges.bin'),
os.path.join(outdir, stem + '_sparse.txt'),
os.path.join(outdir, 'flow.flo')])
flow = read_flo(os.path.join(outdir, 'flow.flo'))
hsv = np.zeros_like(img1).astype(np.uint8)
hsv[..., 1] = 255
mag, ang = cv2.cartToPolar(flow[..., 0].astype(float),
flow[..., 1].astype(float))
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
hsv[..., 0] = ang * 180 / np.pi / 2
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imwrite(os.path.join(outdir, stem + '_flow.png'), bgr)