def eemd(data, noise_std=0.2, num_ensembles=100, num_sifts=10):
"""
Ensemble Empirical Mode Decomposition (EEMD)
*** Must still add in post-processing with EMD ***
"""
# get modes to generate
num_samples = len(data)
num_modes = int(np.fix(np.log2(num_samples)))-1
# normalize incomming data
dstd = data.std()
y = data/dstd
# allocate for starting value
all_modes = np.zeros((num_modes+2,num_samples))
# loop over num_ensembles
for e in range(num_ensembles):
# perturb starting data
x0 = y + np.random.randn(num_samples)*noise_std
# save the starting value
all_modes[0] += x0
# loop over modes
for m in range(num_modes):
# do the sifts
imf = x0
for s in range(num_sifts):
imf = _do_one_sift(imf)
# save the imf
all_modes[m+1] += imf
# set the residual
x0 = x0 - imf
# save the final residual
all_modes[-1] += x0
# average everything out and renormalize
return all_modes*dstd/np.float64(num_ensembles)
python类fix()的实例源码
def generateBoundingBox(map, reg, scale, t):
stride = 2
cellsize = 12
map = map.T
dx1 = reg[0,:,:].T
dy1 = reg[1,:,:].T
dx2 = reg[2,:,:].T
dy2 = reg[3,:,:].T
(x, y) = np.where(map >= t)
yy = y
xx = x
'''
if y.shape[0] == 1: # only one point exceed threshold
y = y.T
x = x.T
score = map[x,y].T
dx1 = dx1.T
dy1 = dy1.T
dx2 = dx2.T
dy2 = dy2.T
# a little stange, when there is only one bb created by PNet
#print "1: x,y", x,y
a = (x*map.shape[1]) + (y+1)
x = a/map.shape[0]
y = a%map.shape[0] - 1
#print "2: x,y", x,y
else:
score = map[x,y]
'''
#print "dx1.shape", dx1.shape
#print 'map.shape', map.shape
score = map[x,y]
reg = np.array([dx1[x,y], dy1[x,y], dx2[x,y], dy2[x,y]])
if reg.shape[0] == 0:
pass
boundingbox = np.array([yy, xx]).T
bb1 = np.fix((stride * (boundingbox) + 1) / scale).T # matlab index from 1, so with "boundingbox-1"
bb2 = np.fix((stride * (boundingbox) + cellsize - 1 + 1) / scale).T # while python don't have to
score = np.array([score])
boundingbox_out = np.concatenate((bb1, bb2, score, reg), axis=0)
#print '(x,y)',x,y
#print 'score', score
#print 'reg', reg
return boundingbox_out.T
def STFT(x, wlen, h, nfft, fs):
########################################################
# Short-Time Fourier Transform %
# with MATLAB Implementation %
# For Python %
# Copier: Nelson Yalta 11/03/15 %
########################################################
# function: [stft, f, t] = stft(x, wlen, h, nfft, fs)
# x - signal in the time domain
# wlen - length of the hamming window
# h - hop size
# nfft - number of FFT points
# fs - sampling frequency, Hz
# f - frequency vector, Hz
# t - time vector, s
# stft - STFT matrix (only unique points, time across columns, freq across rows)
# represent x as column-vector if it is not
if (len(x.shape) > 1) and (x.shape[1] > 1):
x = x.transpose()
# length of the signal
xlen = x.shape[0]
# form a periodic hamming window
win = hamming(wlen, False)
# form the stft matrix
rown = int(np.ceil((1.0+nfft)/2))
coln = int(np.fix((xlen-wlen)/h) + 1)
short_tft = np.zeros((rown,coln)).astype('complex64')
# initialize the indexes
indx = 0
col = 0
# perform STFT
while (indx + wlen <= xlen):
# windowing
xw =x[indx:indx+wlen]*win
# FFT
X = np.fft.fft(xw,nfft)
# update the stft matrix
short_tft[:,col] = X[0:rown]
# update the indexes
indx += h
col += 1
# calculate the time and frequency vectors
t = np.linspace(wlen/2,wlen/2+(coln-1)*h,coln)/fs
f = np.arange(0,rown,dtype= np.float32)*fs/nfft
return short_tft, f, t
def balanced_accuracy_score(y_true, y_pred, method = 'edges', random_state=None):
"""Balanced classification accuracy metric (multi-class).
Keeps only a subset of the data instances corresponding to the rest class.
The size of the subset is equal to the median group size of the other
classes."""
_check_x_y(y_true,y_pred)
classes, n_instances = np.unique(y_true, return_counts=True)
median_instances = np.median(n_instances[1:])
n_classes = classes.size
idx_rest = np.where(y_true == 0)[0] # Find rest instances
idx_else = np.where(y_true != 0)[0] # Find all other instances
if method == 'random':
if random_state is not None:
np.random.seed(random_state)
idx_keep = np.random.choice(idx_rest,median_instances, replace=False) # Keep a random subset
idx_final = np.sort(np.hstack((idx_keep, idx_else)))
if method == 'edges':
samples_per_rest_repetition = np.fix(median_instances / (2*n_classes - 1)).astype('int'); # How many we want to keep for each rest repetition
if samples_per_rest_repetition < 1:
samples_per_rest_repetition = 1;
changes = np.diff(y_true) # Stimulus change
idx_changes = np.nonzero(changes)[0] # Stimulus change
idx_from_rest = idx_changes[np.arange(start=0,stop=idx_changes.size,step=2)] # Changing from rest to movement
idx_to_rest = idx_changes[np.arange(start=1,stop=idx_changes.size,step=2)] # Changing from rest to movement
idx_to_rest = np.hstack(([0], idx_to_rest))
idx_keep = []
for ii,jj in zip(idx_to_rest,idx_from_rest):
center = np.fix(ii + (jj-ii)/2)
idx_keep.extend(np.arange(center,center+samples_per_rest_repetition))
idx_keep = np.asarray(idx_keep, dtype='int')
idx_final = np.sort(np.hstack((idx_keep, idx_else)))
true_new = y_true[idx_final]
pred_new = y_pred[idx_final]
return accuracy_score(true_new, pred_new)
def balanced_log_loss(y_true, y_pred, method = 'edges', random_state=None):
"""Balanced log-loss metric (multi-class).
Keeps only a subset of the data instances corresponding to the rest class.
The size of the subset is equal to the median group size of the other
classes."""
# y_true = np.asarray(y_true)
# y_pred = np.asarray(y_pred)
classes, n_instances = np.unique(y_true, return_counts=True)
median_instances = np.median(n_instances[1:])
n_classes = classes.size
idx_rest = np.where(y_true == 0)[0] # Find rest instances
idx_else = np.where(y_true != 0)[0] # Find all other instances
if method == 'random':
if random_state is not None:
np.random.seed(random_state)
idx_keep = np.random.choice(idx_rest,median_instances, replace=False) # Keep a random subset
idx_final = np.sort(np.hstack((idx_keep, idx_else)))
if method == 'edges':
samples_per_rest_repetition = np.fix(median_instances / (2*n_classes - 1)).astype('int'); # How many we want to keep for each rest repetition
if samples_per_rest_repetition < 1:
samples_per_rest_repetition = 1;
changes = np.diff(y_true) # Stimulus change
idx_changes = np.nonzero(changes)[0] # Stimulus change
idx_from_rest = idx_changes[np.arange(start=0,stop=idx_changes.size,step=2)] # Changing from rest to movement
idx_to_rest = idx_changes[np.arange(start=1,stop=idx_changes.size,step=2)] # Changing from rest to movement
idx_to_rest = np.hstack(([0], idx_to_rest))
idx_keep = []
for ii,jj in zip(idx_to_rest,idx_from_rest):
center = np.fix(ii + (jj-ii)/2)
idx_keep.extend(np.arange(center,center+samples_per_rest_repetition))
idx_keep = np.asarray(idx_keep, dtype='int')
idx_final = np.sort(np.hstack((idx_keep, idx_else)))
true_new = y_true[idx_final]
pred_new = y_pred[idx_final]
return log_loss(true_new, pred_new)
frequest.py 文件源码
项目:Fingerprint-Enhancement-Python
作者: Utkarsh-Deshmukh
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def frequest(im,orientim,windsze,minWaveLength,maxWaveLength):
rows,cols = np.shape(im);
# Find mean orientation within the block. This is done by averaging the
# sines and cosines of the doubled angles before reconstructing the
# angle again. This avoids wraparound problems at the origin.
cosorient = np.mean(np.cos(2*orientim));
sinorient = np.mean(np.sin(2*orientim));
orient = math.atan2(sinorient,cosorient)/2;
# Rotate the image block so that the ridges are vertical
#ROT_mat = cv2.getRotationMatrix2D((cols/2,rows/2),orient/np.pi*180 + 90,1)
#rotim = cv2.warpAffine(im,ROT_mat,(cols,rows))
rotim = scipy.ndimage.rotate(im,orient/np.pi*180 + 90,axes=(1,0),reshape = False,order = 3,mode = 'nearest');
# Now crop the image so that the rotated image does not contain any
# invalid regions. This prevents the projection down the columns
# from being mucked up.
cropsze = int(np.fix(rows/np.sqrt(2)));
offset = int(np.fix((rows-cropsze)/2));
rotim = rotim[offset:offset+cropsze][:,offset:offset+cropsze];
# Sum down the columns to get a projection of the grey values down
# the ridges.
proj = np.sum(rotim,axis = 0);
dilation = scipy.ndimage.grey_dilation(proj, windsze,structure=np.ones(windsze));
temp = np.abs(dilation - proj);
peak_thresh = 2;
maxpts = (temp<peak_thresh) & (proj > np.mean(proj));
maxind = np.where(maxpts);
rows_maxind,cols_maxind = np.shape(maxind);
# Determine the spatial frequency of the ridges by divinding the
# distance between the 1st and last peaks by the (No of peaks-1). If no
# peaks are detected, or the wavelength is outside the allowed bounds,
# the frequency image is set to 0
if(cols_maxind<2):
freqim = np.zeros(im.shape);
else:
NoOfPeaks = cols_maxind;
waveLength = (maxind[0][cols_maxind-1] - maxind[0][0])/(NoOfPeaks - 1);
if waveLength>=minWaveLength and waveLength<=maxWaveLength:
freqim = 1/np.double(waveLength) * np.ones(im.shape);
else:
freqim = np.zeros(im.shape);
return(freqim);
def filter_window_polar(img, wsize, fun, rscale, random=False):
r"""Apply a filter of an approximated square window of half size `fsize`
on a given polar image `img`.
Parameters
----------
img : :class:`numpy:numpy.ndarray`
2d array of values to which the filter is to be applied
wsize : float
Half size of the window centred on the pixel [m]
fun : string
name of the 1d filter from :mod:`scipy:scipy.ndimage`
rscale : float
range [m] scale of the polar grid
random: bool
True to use random azimuthal size to avoid long-term biases.
Returns
-------
output : :class:`numpy:numpy.ndarray`
Array with the same shape as `img`, containing the filter's results.
"""
ascale = 2 * np.pi / img.shape[0]
data_filtered = np.empty(img.shape, dtype=img.dtype)
fun = getattr(filters, "%s_filter1d" % fun)
nbins = img.shape[-1]
ranges = np.arange(nbins) * rscale + rscale / 2
asize = ranges * ascale
if random:
na = prob_round(wsize / asize).astype(int)
else:
na = np.fix(wsize / asize + 0.5).astype(int)
# Maximum of adjacent azimuths (higher close to the origin) to
# increase performance
na[na > 20] = 20
sr = np.fix(wsize / rscale + 0.5).astype(int)
for sa in np.unique(na):
imax = np.where(na >= sa)[0][-1] + 1
imin = np.where(na <= sa)[0][0]
if sa == 0:
data_filtered[:, imin:imax] = img[:, imin:imax]
imin2 = max(imin - sr, 0)
imax2 = min(imax + sr, nbins)
temp = img[:, imin2:imax2]
temp = fun(temp, size=2 * sa + 1, mode='wrap', axis=0)
temp = fun(temp, size=2 * sr + 1, axis=1)
imin3 = imin - imin2
imax3 = imin3 + imax - imin
data_filtered[:, imin:imax] = temp[:, imin3:imax3]
return data_filtered