def threshold_test(self):
mx_adj, my_adj, mz_adj = self.mag_adj()
m_normal = np.sqrt(np.square(mx_adj)+np.square(my_adj)+np.square(mz_adj))
heading = np.degrees(np.arctan2(mx_adj/m_normal, my_adj/m_normal))
heading_diff = np.diff(heading)
rotate_index = np.insert(np.where(np.absolute(heading_diff)>20.0), 0, 0)
plt.plot(heading_diff)
plt.show()
angle_lst = []
for i in range(rotate_index.size):
try:
angle_onestep = np.mean(heading[rotate_index[i]: rotate_index[i+1]])
angle_lst.append(angle_onestep)
except:
pass
print angle_lst
python类absolute()的实例源码
def fft_test2(self):
axis = str(self.axis_combobox.currentText())
if axis.startswith('a'):
normal_para = 16384.0
elif axis.startswith('g'):
normal_para = 131.0
signal =( self.raw_data[axis] - self.bias_dict[axis])/ normal_para
n = signal.size # Number of data points
dx = 0.007 # Sampling period (in meters)
Fk = np.fft.fft(signal) # Fourier coefficients (divided by n)
nu = np.fft.fftfreq(n,dx) # Natural frequencies
#Fk = np.fft.fftshift(Fk) # Shift zero freq to center
#nu = np.fft.fftshift(nu) # Shift zero freq to center
f, ax = plt.subplots(3,1,sharex=True)
ax[0].plot(nu, np.real(Fk)) # Plot Cosine terms
ax[0].set_ylabel(r'$Re[F_k]$', size = 'x-large')
ax[1].plot(nu, np.imag(Fk)) # Plot Sine terms
ax[1].set_ylabel(r'$Im[F_k]$', size = 'x-large')
ax[2].plot(nu, np.absolute(Fk)**2) # Plot spectral power
ax[2].set_ylabel(r'$\vert F_k \vert ^2$', size = 'x-large')
ax[2].set_xlabel(r'$\widetilde{\nu}$', size = 'x-large')
plt.title(axis)
plt.show()
extract_features.py 文件源码
项目:inception-face-shape-classifier
作者: adonistio
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def q(landmarks,index1,index2):
#get angle between a i1 and i2
x1 = landmarks[int(index1)][0]
y1 = landmarks[int(index1)][1]
x2 = landmarks[int(index2)][0]
y2 = landmarks[int(index2)][1]
x_diff = float(x1 - x2)
if (y1 == y2): y_diff = 0.1
if (y1 < y2): y_diff = float(np.absolute(y1 - y2))
if (y1 > y2):
y_diff = 0.1
print("Error: Facial feature located below chin.")
return np.absolute(math.atan(x_diff/y_diff))
#image_dir should contain sub-folders containing the images where features need to be extracted
#only one face should be present in each image
#if multiple faces are detected by OpenCV, image must be manually edited; the parameters of the face-detection routine can also be changed
def compute_by_noise_pow(self, signal, n_pow):
s_spec = np.fft.fftpack.fft(signal * self._window)
s_amp = np.absolute(s_spec)
s_phase = np.angle(s_spec)
gamma = self._calc_aposteriori_snr(s_amp, n_pow)
xi = self._calc_apriori_snr(gamma)
self._prevGamma = gamma
nu = gamma * xi / (1.0 + xi)
self._G = (self._gamma15 * np.sqrt(nu) / gamma) * np.exp(-nu / 2.0) *\
((1.0 + nu) * spc.i0(nu / 2.0) + nu * spc.i1(nu / 2.0))
idx = np.less(s_amp ** 2.0, n_pow)
self._G[idx] = self._constant
idx = np.isnan(self._G) + np.isinf(self._G)
self._G[idx] = xi[idx] / (xi[idx] + 1.0)
idx = np.isnan(self._G) + np.isinf(self._G)
self._G[idx] = self._constant
self._G = np.maximum(self._G, 0.0)
amp = self._G * s_amp
amp = np.maximum(amp, 0.0)
amp2 = self._ratio * amp + (1.0 - self._ratio) * s_amp
self._prevAmp = amp
spec = amp2 * np.exp(s_phase * 1j)
return np.real(np.fft.fftpack.ifft(spec))
def compute_by_noise_pow(self, signal, n_pow):
s_spec = np.fft.fftpack.fft(signal * self._window)
s_amp = np.absolute(s_spec)
s_phase = np.angle(s_spec)
gamma = self._calc_aposteriori_snr(s_amp, n_pow)
xi = self._calc_apriori_snr(gamma)
# xi = self._calc_apriori_snr2(gamma,n_pow)
self._prevGamma = gamma
nu = gamma * xi / (1.0 + xi)
self._G = xi / (1.0 + xi) * np.exp(0.5 * spc.exp1(nu))
idx = np.less(s_amp ** 2.0, n_pow)
self._G[idx] = self._constant
idx = np.isnan(self._G) + np.isinf(self._G)
self._G[idx] = xi[idx] / (xi[idx] + 1.0)
idx = np.isnan(self._G) + np.isinf(self._G)
self._G[idx] = self._constant
self._G = np.maximum(self._G, 0.0)
amp = self._G * s_amp
amp = np.maximum(amp, 0.0)
amp2 = self._ratio * amp + (1.0 - self._ratio) * s_amp
self._prevAmp = amp
spec = amp2 * np.exp(s_phase * 1j)
return np.real(np.fft.fftpack.ifft(spec))
def compute_by_noise_pow(self, signal, n_pow):
s_spec = np.fft.fftpack.fft(signal * self._window)
s_amp = np.absolute(s_spec)
s_phase = np.angle(s_spec)
gamma = self._calc_aposteriori_snr(s_amp, n_pow)
# xi = self._calc_apriori_snr2(gamma,n_pow)
xi = self._calc_apriori_snr(gamma)
self._prevGamma = gamma
u = 0.5 - self._mu / (4.0 * np.sqrt(gamma * xi))
self._G = u + np.sqrt(u ** 2.0 + self._tau / (gamma * 2.0))
idx = np.less(s_amp ** 2.0, n_pow)
self._G[idx] = self._constant
idx = np.isnan(self._G) + np.isinf(self._G)
self._G[idx] = xi[idx] / (xi[idx] + 1.0)
idx = np.isnan(self._G) + np.isinf(self._G)
self._G[idx] = self._constant
self._G = np.maximum(self._G, 0.0)
amp = self._G * s_amp
amp = np.maximum(amp, 0.0)
amp2 = self._ratio * amp + (1.0 - self._ratio) * s_amp
self._prevAmp = amp
spec = amp2 * np.exp(s_phase * 1j)
return np.real(np.fft.fftpack.ifft(spec))
def read_mongodb_matrix(tickers, matrix_name):
mis = MatrixItem.objects(i__in = tickers,
j__in = tickers,
matrix_name = matrix_name)
n = len(tickers)
available_tickers = set([mi.i for mi in mis])
np.random.seed(n)
a = np.absolute(np.random.normal(0, 0.001, [n, n]))
a_triu = np.triu(a, k=0)
a_tril = np.tril(a, k=0)
a_diag = np.diag(np.diag(a))
a_sym_triu = a_triu + a_triu.T - a_diag
matrix = pd.DataFrame(a_sym_triu,
index = tickers,
columns = tickers)
for mi in mis:
if abs(mi.v) > 10:
mi.v = 0.001
matrix.set_value(mi.i, mi.j, mi.v)
matrix.set_value(mi.j, mi.i, mi.v)
matrix = matrix.round(6)
return matrix
def outlier_identification(self, model, x_train, y_train):
# Split the training data into an extra set of test
x_train_split, x_test_split, y_train_split, y_test_split = train_test_split(x_train, y_train)
print('\nOutlier shapes')
print(np.shape(x_train_split), np.shape(x_test_split), np.shape(y_train_split), np.shape(y_test_split))
model.fit(x_train_split, y_train_split)
y_predicted = model.predict(x_test_split)
residuals = np.absolute(y_predicted - y_test_split)
rmse_pred_vs_actual = self.rmse(y_predicted, y_test_split)
outliers_mask = residuals >= rmse_pred_vs_actual
# outliers_mask = np.insert(np.zeros((np.shape(y_train_split)[0],), dtype=np.int), np.shape(y_train_split)[0],
# outliers_mask)
outliers_mask = np.concatenate([np.zeros((np.shape(y_train_split)[0],), dtype=bool), outliers_mask])
not_an_outlier = outliers_mask == 0
# Resample the training set from split, since the set was randomly split
x_out = np.insert(x_train_split, np.shape(x_train_split)[0], x_test_split, axis=0)
y_out = np.insert(y_train_split, np.shape(y_train_split)[0], y_test_split, axis=0)
return x_out[not_an_outlier, ], y_out[not_an_outlier, ]
def wait_for_human_interaction(self, arm_threshold=1, joystick_threshold=0.15):
rospy.loginfo("We are waiting for human interaction...")
def detect_arm_variation():
new_effort = np.array(self.topics.torso_l_j.effort)
delta = np.absolute(effort - new_effort)
return np.amax(delta) > arm_threshold
def detect_joy_variation():
return np.amax(np.abs(self.topics.joy1.axes)) > joystick_threshold
effort = np.array(self.topics.torso_l_j.effort)
rate = rospy.Rate(50)
is_joystick_demo = None
while not rospy.is_shutdown():
if detect_arm_variation():
is_joystick_demo = False
break
elif detect_joy_variation():
is_joystick_demo = True
break
rate.sleep()
return is_joystick_demo
################################# Service callbacks
def peak(self):
"""Calculate peak sample value (with sign)"""
if len(self.samples) != 0:
if np.issubdtype(self.samples.dtype, float):
idx = np.absolute(self.samples).argmax(axis=0)
else:
# We have to be careful when checking two's complement since the absolute value
# of the smallest possible value can't be represented without overflowing. For
# example: signed 16bit has range [-32768, 32767] so abs(-32768) cannot be
# represented in signed 16 bits --> use a bigger datatype
bigger = np.asarray(self.samples, dtype=np.int64)
idx = np.absolute(bigger).argmax(axis=0)
peak = np.array([self.samples[row,col] for col, row in enumerate(idx)])
else:
# no samples are set but channels are configured
idx = np.zeros(self.ch, dtype=np.int64)
peak = np.zeros(self.ch)
peak[:] = float('nan')
return peak, idx
def normalise(self):
"""Normalise samples so that the new range is
[-1.0, 1.0] for floats
Converts **IN PLACE**
TODO: verify
[-2^n, 2^n-1] for ints
"""
peaks, unused_idx = self.peak()
self._logger.debug("raw peaks: %s" %peaks)
max_abs = np.max(np.absolute(peaks))
self._logger.debug("max_abs: %s" %max_abs)
self.samples = self.samples/max_abs
peaks, unused_idx = self.peak()
self._logger.debug("new peaks: %s" %peaks)
#===================================================================================================
# Audio sub-classes
#===================================================================================================
def single_spectrogram(inseq,fs,wlen,h,imag=False):
"""
imag: Return Imaginary Data of the STFT on True
"""
NFFT = int(2**(np.ceil(np.log2(wlen))))
K = np.sum(hamming(wlen, False))/wlen
raw_data = inseq.astype('float32')
raw_data = raw_data/np.amax(np.absolute(raw_data))
stft_data,_,_ = STFT(raw_data,wlen,h,NFFT,fs)
s = np.absolute(stft_data)/wlen/K;
if np.fmod(NFFT,2):
s[1:,:] *=2
else:
s[1:-2] *=2
real_data = np.transpose(20*np.log10(s + 10**-6)).astype(np.float32)
if imag:
imag_data = np.angle(stft_data).astype(np.float32)
return real_data,imag_data
return real_data
car_recognizer.py 文件源码
项目:Vision-based-parking-lot-availability-OpenCV
作者: Saar1312
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def getEdges(gray,detector,min_thr=None,max_thr=None):
"""
Where detector in {1,2,3,4}
1: Laplacian
2: Sobelx
3: Sobely
4: Canny
5: Sobelx with possitive and negative slope (in 2 negative slopes are lost)
"""
if min_thr is None:
min_thr = 100
max_thr = 200
if detector == 1:
return cv2.Laplacian(gray,cv2.CV_64F)
elif detector == 2:
return cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=-1)
elif detector == 3:
return cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=-1)
elif detector == 4:
return cv2.Canny(gray,min_thr,max_thr) # Canny(min_thresh,max_thresh) (threshold not to the intensity but to the
# intensity gradient -value that measures how different is a pixel to its neighbors-)
elif detector == 5:
sobelx64f = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=5)
abs_sobel64f = np.absolute(sobelx64f)
return np.uint8(abs_sobel64f)
def tune_everything(x0squared, C, T, gmin, gmax):
# First tune based on dynamic range
if C==0:
dr=gmax/gmin
mustar=((np.sqrt(dr)-1)/(np.sqrt(dr)+1))**2
alpha_star = (1+np.sqrt(mustar))**2/gmax
return alpha_star,mustar
dist_to_opt = x0squared
grad_var = C
max_curv = gmax
min_curv = gmin
const_fact = dist_to_opt * min_curv**2 / 2 / grad_var
coef = [-1, 3, -(3 + const_fact), 1]
roots = np.roots(coef)
roots = roots[np.real(roots) > 0]
roots = roots[np.real(roots) < 1]
root = roots[np.argmin(np.imag(roots) ) ]
assert root > 0 and root < 1 and np.absolute(root.imag) < 1e-6
dr = max_curv / min_curv
assert max_curv >= min_curv
mu = max( ( (np.sqrt(dr) - 1) / (np.sqrt(dr) + 1) )**2, root**2)
lr_min = (1 - np.sqrt(mu) )**2 / min_curv
lr_max = (1 + np.sqrt(mu) )**2 / max_curv
alpha_star = lr_min
mustar = mu
return alpha_star, mustar
def world_2_voxel(world_coordinates, origin, spacing):
stretched_voxel_coordinates = np.absolute(world_coordinates - origin)
voxel_coordinates = stretched_voxel_coordinates / spacing
return voxel_coordinates
def test_non_binary_ufunc(self):
""" Test that ireduce_ufunc raises ValueError if non-binary ufunc is used """
with self.assertRaises(ValueError):
ireduce_ufunc(range(10), ufunc = np.absolute)
def slices_from_global_coords(self, slices):
"""
Used for converting from mip 0 coordinates to upper mip level
coordinates. This is mainly useful for debugging since the neuroglancer
client displays the mip 0 coordinates for your cursor.
"""
maxsize = list(self.mip_volume_size(0)) + [ self.num_channels ]
minsize = list(self.mip_voxel_offset(0)) + [ 0 ]
slices = generate_slices(slices, minsize, maxsize)[:3]
lower = Vec(*map(lambda x: x.start, slices))
upper = Vec(*map(lambda x: x.stop, slices))
step = Vec(*map(lambda x: x.step, slices))
lower /= self.downsample_ratio
upper /= self.downsample_ratio
signs = step / np.absolute(step)
step = signs * max2(np.absolute(step / self.downsample_ratio), Vec(1,1,1))
step = Vec(*np.round(step))
return [
slice(lower.x, upper.x, step.x),
slice(lower.y, upper.y, step.y),
slice(lower.z, upper.z, step.z)
]
def worldToVoxelCoord(worldCoord, origin, spacing):
stretchedVoxelCoord = np.absolute(worldCoord - origin)
voxelCoord = stretchedVoxelCoord / spacing
return voxelCoord
def world_to_voxel_coord(worldCoord, origin, spacing):
strectchedVoxelCoord = np.absolute(worldCoord - origin)
voxelCoord = strectchedVoxelCoord / spacing
return voxelCoord
def world2voxel(world_coord, origin, spacing):
stretched_voxel_coord = np.absolute(world_coord - origin)
voxel_coord = stretched_voxel_coord / spacing
return voxel_coord