def __SubDoWavelets(self,waveforms):
scales = 4
dimensions = 10
nspk,ls = waveforms.shape
cc = pywt.wavedec(waveforms,"haar",mode="symmetric",level=scales,axis=-1)
cc = np.hstack(cc)
sd = list()
for i in range(ls):
test_data = cc[:,i]
thr_dist = np.std(test_data,ddof=1)*3
thr_dist_min = np.mean(test_data)-thr_dist
thr_dist_max = np.mean(test_data)+thr_dist
aux = test_data[(test_data>thr_dist_min)&(test_data<thr_dist_max)]
if aux.size > 10:
sd.append(self.__test_ks(aux))
else:
sd.append(0)
ind = np.argsort(sd)
ind = ind[::-1]
coeff = ind[:dimensions]
waveletspk = cc[:,coeff]
return waveletspk
python类hstack()的实例源码
def applyFilter(data, b, a, padding=100, bidir=True):
"""Apply a linear filter with coefficients a, b. Optionally pad the data before filtering
and/or run the filter in both directions."""
try:
import scipy.signal
except ImportError:
raise Exception("applyFilter() requires the package scipy.signal.")
d1 = data.view(np.ndarray)
if padding > 0:
d1 = np.hstack([d1[:padding], d1, d1[-padding:]])
if bidir:
d1 = scipy.signal.lfilter(b, a, scipy.signal.lfilter(b, a, d1)[::-1])[::-1]
else:
d1 = scipy.signal.lfilter(b, a, d1)
if padding > 0:
d1 = d1[padding:-padding]
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d1, info=data.infoCopy())
else:
return d1
def pull_item(self, index):
img_id = self.ids[index]
target = ET.parse(self._annopath % img_id).getroot()
img = cv2.imread(self._imgpath % img_id)
height, width, channels = img.shape
if self.target_transform is not None:
target = self.target_transform(target, width, height)
if self.transform is not None:
target = np.array(target)
img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])
# to rgb
img = img[:, :, (2, 1, 0)]
# img = img.transpose(2, 0, 1)
target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
return torch.from_numpy(img).permute(2, 0, 1), target, height, width
# return torch.from_numpy(img), target, height, width
def text_to_char_array(original):
r"""
Given a Python string ``original``, remove unsupported characters, map characters
to integers and return a numpy array representing the processed string.
"""
# Create list of sentence's words w/spaces replaced by ''
result = original.replace(" '", "") # TODO: Deal with this properly
result = result.replace("'", "") # TODO: Deal with this properly
result = result.replace(' ', ' ')
result = result.split(' ')
# Tokenize words into letters adding in SPACE_TOKEN where required
result = np.hstack([SPACE_TOKEN if xt == '' else list(xt) for xt in result])
# Map characters into indicies
result = np.asarray([SPACE_INDEX if xt == SPACE_TOKEN else ord(xt) - FIRST_INDEX for xt in result])
# Add result to results
return result
def text_to_char_array(original):
r"""
Given a Python string ``original``, remove unsupported characters, map characters
to integers and return a numpy array representing the processed string.
"""
# Create list of sentence's words w/spaces replaced by ''
result = original.replace(" '", "") # TODO: Deal with this properly
result = result.replace("'", "") # TODO: Deal with this properly
result = result.replace(' ', ' ')
result = result.split(' ')
# Tokenize words into letters adding in SPACE_TOKEN where required
result = np.hstack([SPACE_TOKEN if xt == '' else list(xt) for xt in result])
# Map characters into indicies
result = np.asarray([SPACE_INDEX if xt == SPACE_TOKEN else (
ord(xt) - FIRST_INDEX if ord(xt)>FIRST_INDEX else 27+int(xt)) for xt in result])
# Add result to results
return result
def plot_electrodes(self):
if not getattr(self, 'collections', None):
# It is important to set one facecolor per point so that we can change
# it later
self.electrode_collection = self.electrode_ax.scatter(self.x_position,
self.y_position,
facecolor=['black' for _ in self.x_position],
s=30)
self.electrode_ax.set_xlabel('Space [um]')
self.electrode_ax.set_xticklabels([])
self.electrode_ax.set_ylabel('Space [um]')
self.electrode_ax.set_yticklabels([])
else:
self.electrode_collection.set_offsets(np.hstack([self.x_position[np.newaxis, :].T,
self.y_position[np.newaxis, :].T]))
ax, x, y = self.electrode_ax, self.y_position, self.x_position
ymin, ymax = min(x), max(x)
yrange = (ymax - ymin)*0.5 * 1.05 # stretch everything a bit
ax.set_ylim((ymax + ymin)*0.5 - yrange, (ymax + ymin)*0.5 + yrange)
xmin, xmax = min(y), max(y)
xrange = (xmax - xmin)*0.5 * 1.05 # stretch everything a bit
ax.set_xlim((xmax + xmin)*0.5 - xrange, (xmax + xmin)*0.5 + xrange)
self.ui.raw_data.draw_idle()
def read_image(imagery_path):
# Read image
dataset = gdal.Open(imagery_path)
dsmatrix = dataset.ReadAsArray(xoff=0, yoff=0, xsize=dataset.RasterXSize, ysize=dataset.RasterYSize)
# Get Geographic meta data
geo_trans_list = dataset.GetGeoTransform()
proj_str = dataset.GetProjection()
num_bands = dataset.RasterCount
# Adapt to one bands or multi-bands
if num_bands > 1:
# Unfold array into pandas DataFrame
rows = dsmatrix.shape[1]
cols = dsmatrix.shape[2]
data_array = dsmatrix[:,0,:]
for irow in range(1,rows):
tempmatirx = dsmatrix[:,irow,:]
data_array = np.hstack((data_array,tempmatirx))
else:
# Unfold array into pandas DataFrame
rows = dsmatrix.shape[0]
cols = dsmatrix.shape[1]
data_array = dsmatrix[0,:]
for irow in range(1,rows):
tempmatirx = dsmatrix[irow,:]
data_array = np.hstack((data_array,tempmatirx))
data_frame = pd.DataFrame(data_array.T)
return data_frame, rows, cols, geo_trans_list, proj_str, num_bands
def KeyGen(**kwargs):
'''
Appendix B of BLISS paper
m_bar = m + n
o/p:
A: Public Key n x m' numpy array
S: Secret Key m'x n numpy array
'''
q, n, m, alpha = kwargs['q'], kwargs['n'], kwargs['m'], kwargs['alpha']
Aq_bar = util.crypt_secure_matrix(-(q-1)/2, (q-1)/2, n, m)
S_bar = util.crypt_secure_matrix(-(2)**alpha, (2)**alpha, m, n) # alpha is small enough, we need not reduce (modq)
S = np.vstack((S_bar, np.eye(n, dtype = int))) # dimension is m_bar x n, Elements are in Z mod(2q)
A = np.hstack((2*Aq_bar, q * np.eye(n, dtype = int) - 2*np.matmul(Aq_bar,S_bar))) # dimension is n x m_bar , Elements are in Z mod(2q)
#return util.matrix_to_Zq(A, 2*q), S, Aq_bar, S_bar
return util.matrix_to_Zq(A, 2*q), S
def repeat(tensor: tf.Tensor, repeats: int, axis: int) -> tf.Tensor:
"""
Repeat elements of the input tensor in the specified axis ``repeats``-times.
.. note::
Chaining of this op may produce TF warnings although the performance seems to be unaffected.
:param tensor: TF tensor to be repeated
:param repeats: number of repeats
:param axis: axis to repeat
:return: tensor with repeated elements
"""
shape = tensor.get_shape().as_list()
dims = np.arange(len(tensor.shape))
prepare_perm = np.hstack(([axis], np.delete(dims, axis)))
restore_perm = np.hstack((dims[1:axis+1], [0], dims[axis+1:]))
indices = tf.cast(tf.floor(tf.range(0, shape[axis]*repeats)/tf.constant(repeats)), 'int32')
shuffled = tf.transpose(tensor, prepare_perm)
repeated = tf.gather(shuffled, indices)
return tf.transpose(repeated, restore_perm)
def test_uw_rgbd_scene(version='v1'):
from pybot.vision.image_utils import to_color
from pybot.vision.imshow_utils import imshow_cv
v1_directory = '/media/spillai/MRG-HD1/data/rgbd-scenes-v1/'
v2_directory = '/media/spillai/MRG-HD1/data/rgbd-scenes-v2/rgbd-scenes-v2/'
if version == 'v1':
rgbd_data_uw = UWRGBDSceneDataset(version='v1',
directory=os.path.join(v1_directory, 'rgbd-scenes'),
aligned_directory=os.path.join(v1_directory, 'rgbd-scenes-aligned'))
elif version == 'v2':
rgbd_data_uw = UWRGBDSceneDataset(version='v2', directory=v2_directory)
else:
raise RuntimeError('''Version %s not supported. '''
'''Check dataset and choose v1/v2 scene dataset''' % version)
for f in rgbd_data_uw.iteritems(every_k_frames=5, with_ground_truth=True):
vis = rgbd_data_uw.annotate(f)
imshow_cv('frame', np.hstack([f.img, vis]), text='Image')
imshow_cv('depth', (f.depth / 16).astype(np.uint8), text='Depth')
cv2.waitKey(100)
return rgbd_data_uw
def __mul__(self, other):
"""
Left-multiply RigidTransform with another rigid transform
Two variants:
RigidTransform: Identical to oplus operation
ndarray: transform [N x 3] point set (X_2 = p_21 * X_1)
"""
if isinstance(other, DualQuaternion):
return DualQuaternion.from_dq(other.real * self.real,
other.dual * self.real + other.real * self.dual)
elif isinstance(other, float):
return DualQuaternion.from_dq(self.real * other, self.dual * other)
# elif isinstance(other, nd.array):
# X = np.hstack([other, np.ones((len(other),1))]).T
# return (np.dot(self.matrix, X).T)[:,:3]
else:
raise TypeError('__mul__ typeerror {:}'.format(type(other)))
def im_describe(*args, **kwargs):
"""
Describe image using dense sampling / specific detector-descriptor combination.
Sugar for description-only call.
"""
kpts, desc = im_detect_and_describe(*args, **kwargs)
return desc
# def color_codes(img, kpts):
# # Extract color information (Lab)
# pts = np.vstack([kp.pt for kp in kpts]).astype(np.int32)
# imgc = median_blur(img, size=5)
# cdesc = img[pts[:,1], pts[:,0]]
# return kpts, np.hstack([desc, cdesc])
# =====================================================================
# General-purpose object recognition interfaces, and functions
# ---------------------------------------------------------------------
def ray(self, pts, undistort=True, rotate=False, normalize=False):
"""
Returns the ray corresponding to the points.
Optionally undistort (defaults to true), and
rotate ray to the camera's viewpoint
"""
upts = self.undistort_points(pts) if undistort else pts
ret = unproject_points(
np.hstack([ (colvec(upts[:,0])-self.cx) / self.fx, (colvec(upts[:,1])-self.cy) / self.fy ])
)
if rotate:
ret = self.extrinsics.rotate_vec(ret)
if normalize:
ret = ret / np.linalg.norm(ret, axis=1)[:, np.newaxis]
return ret
2linear_regression.py 文件源码
项目:Python-Machine-Learning-By-Example
作者: PacktPublishing
项目源码
文件源码
阅读 114
收藏 0
点赞 0
评论 0
def train_linear_regression(X_train, y_train, max_iter, learning_rate, fit_intercept=False):
""" Train a linear regression model with gradient descent
Args:
X_train, y_train (numpy.ndarray, training data set)
max_iter (int, number of iterations)
learning_rate (float)
fit_intercept (bool, with an intercept w0 or not)
Returns:
numpy.ndarray, learned weights
"""
if fit_intercept:
intercept = np.ones((X_train.shape[0], 1))
X_train = np.hstack((intercept, X_train))
weights = np.zeros(X_train.shape[1])
for iteration in range(max_iter):
weights = update_weights_gd(X_train, y_train, weights, learning_rate)
# Check the cost for every 100 (for example) iterations
if iteration % 100 == 0:
print(compute_cost(X_train, y_train, weights))
return weights
3logistic_regression_from_scratch.py 文件源码
项目:Python-Machine-Learning-By-Example
作者: PacktPublishing
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def train_logistic_regression(X_train, y_train, max_iter, learning_rate, fit_intercept=False):
""" Train a logistic regression model
Args:
X_train, y_train (numpy.ndarray, training data set)
max_iter (int, number of iterations)
learning_rate (float)
fit_intercept (bool, with an intercept w0 or not)
Returns:
numpy.ndarray, learned weights
"""
if fit_intercept:
intercept = np.ones((X_train.shape[0], 1))
X_train = np.hstack((intercept, X_train))
weights = np.zeros(X_train.shape[1])
for iteration in range(max_iter):
weights = update_weights_gd(X_train, y_train, weights, learning_rate)
# Check the cost for every 100 (for example) iterations
if iteration % 1000 == 0:
print(compute_cost(X_train, y_train, weights))
return weights
3logistic_regression_from_scratch.py 文件源码
项目:Python-Machine-Learning-By-Example
作者: PacktPublishing
项目源码
文件源码
阅读 38
收藏 0
点赞 0
评论 0
def train_logistic_regression(X_train, y_train, max_iter, learning_rate, fit_intercept=False):
""" Train a logistic regression model
Args:
X_train, y_train (numpy.ndarray, training data set)
max_iter (int, number of iterations)
learning_rate (float)
fit_intercept (bool, with an intercept w0 or not)
Returns:
numpy.ndarray, learned weights
"""
if fit_intercept:
intercept = np.ones((X_train.shape[0], 1))
X_train = np.hstack((intercept, X_train))
weights = np.zeros(X_train.shape[1])
for iteration in range(max_iter):
weights = update_weights_sgd(X_train, y_train, weights, learning_rate)
# Check the cost for every 2 (for example) iterations
if iteration % 2 == 0:
print(compute_cost(X_train, y_train, weights))
return weights
# Train the SGD model based on 10000 samples
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
scale = max(1, dim ** .5 / 8.) # nota: different from scales in F8
self.linearTF = scale * compute_rotation(self.rseed, dim)
self.xopt = np.hstack(dot(.5 * np.ones((1, dim)), self.linearTF.T)) / scale ** 2
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
scale = max(1, dim ** .5 / 8.)
self.linearTF = scale * compute_rotation(self.rseed, dim)
# if self.zerox:
# self.xopt = zeros(dim) # does not work here
# else:
# TODO: clean this line
self.xopt = np.hstack(dot(self.linearTF, 0.5 * np.ones((dim, 1)) / scale ** 2))
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def applyFilter(data, b, a, padding=100, bidir=True):
"""Apply a linear filter with coefficients a, b. Optionally pad the data before filtering
and/or run the filter in both directions."""
try:
import scipy.signal
except ImportError:
raise Exception("applyFilter() requires the package scipy.signal.")
d1 = data.view(np.ndarray)
if padding > 0:
d1 = np.hstack([d1[:padding], d1, d1[-padding:]])
if bidir:
d1 = scipy.signal.lfilter(b, a, scipy.signal.lfilter(b, a, d1)[::-1])[::-1]
else:
d1 = scipy.signal.lfilter(b, a, d1)
if padding > 0:
d1 = d1[padding:-padding]
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d1, info=data.infoCopy())
else:
return d1
def modeFilter(data, window=500, step=None, bins=None):
"""Filter based on histogram-based mode function"""
d1 = data.view(np.ndarray)
vals = []
l2 = int(window/2.)
if step is None:
step = l2
i = 0
while True:
if i > len(data)-step:
break
vals.append(mode(d1[i:i+window], bins))
i += step
chunks = [np.linspace(vals[0], vals[0], l2)]
for i in range(len(vals)-1):
chunks.append(np.linspace(vals[i], vals[i+1], step))
remain = len(data) - step*(len(vals)-1) - l2
chunks.append(np.linspace(vals[-1], vals[-1], remain))
d2 = np.hstack(chunks)
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d2, info=data.infoCopy())
return d2
def __loadChnTimeWave(self,f,selectChan):
times = list()
waveforms = list()
spk_startswith = "spike_{0}".format(selectChan)
for chn_unit in f["spikes"].keys():
if chn_unit.startswith(spk_startswith):
time = f["spikes"][chn_unit]["times"].value
waveform = f["spikes"][chn_unit]["waveforms"].value
times.append(time)
waveforms.append(waveform)
if times:
times = np.hstack(times)
waveforms = np.vstack(waveforms)
sort_index = np.argsort(times)
waveforms = waveforms[sort_index]
times = times[sort_index]
return times,waveforms
else:
return None,None
def __load_waveforms(self,selectChan,file_name):
spk_startswith = "spike_{0}".format(selectChan)
with hp.File(file_name,"r") as f:
times = list()
waveforms = list()
for chn_unit in f["spikes"].keys():
if chn_unit.startswith(spk_startswith):
tep_time = f["spikes"][chn_unit]["times"].value
waveform = f["spikes"][chn_unit]["waveforms"].value
times.append(tep_time)
waveforms.append(waveform)
if times:
times = np.hstack(times)
waveforms = np.vstack(waveforms)
sort_index = np.argsort(times)
waveforms = waveforms[sort_index]
return waveforms
else:
return None
def modeFilter(data, window=500, step=None, bins=None):
"""Filter based on histogram-based mode function"""
d1 = data.view(np.ndarray)
vals = []
l2 = int(window/2.)
if step is None:
step = l2
i = 0
while True:
if i > len(data)-step:
break
vals.append(mode(d1[i:i+window], bins))
i += step
chunks = [np.linspace(vals[0], vals[0], l2)]
for i in range(len(vals)-1):
chunks.append(np.linspace(vals[i], vals[i+1], step))
remain = len(data) - step*(len(vals)-1) - l2
chunks.append(np.linspace(vals[-1], vals[-1], remain))
d2 = np.hstack(chunks)
if (hasattr(data, 'implements') and data.implements('MetaArray')):
return MetaArray(d2, info=data.infoCopy())
return d2
def __loadChnTimeWave(self,f,selectChan):
times = list()
waveforms = list()
spk_startswith = "spike_{0}".format(selectChan)
for chn_unit in f["spikes"].keys():
if chn_unit.startswith(spk_startswith):
time = f["spikes"][chn_unit]["times"].value
waveform = f["spikes"][chn_unit]["waveforms"].value
times.append(time)
waveforms.append(waveform)
if times:
times = np.hstack(times)
waveforms = np.vstack(waveforms)
sort_index = np.argsort(times)
waveforms = waveforms[sort_index]
times = times[sort_index]
return times,waveforms
else:
return None,None
def __test_ks(self,x):
x = x[~np.isnan(x)]
n = x.size
x.sort()
yCDF = np.arange(1,n+1)/float(n)
notdup = np.hstack([np.diff(x,1),[1]])
notdup = notdup>0
x_expcdf = x[notdup]
y_expcdf = np.hstack([[0],yCDF[notdup]])
zScores = (x_expcdf-np.mean(x))/np.std(x,ddof=1);
mu = 0
sigma = 1
theocdf = 0.5*erfc(-(zScores-mu)/(np.sqrt(2)*sigma))
delta1 = y_expcdf[:-1]-theocdf
delta2 = y_expcdf[1:]-theocdf
deltacdf = np.abs(np.hstack([delta1,delta2]))
KSmax = deltacdf.max()
return KSmax
def __load_waveforms(self,selectChan,file_name):
spk_startswith = "spike_{0}".format(selectChan)
with hp.File(file_name,"r") as f:
times = list()
waveforms = list()
for chn_unit in f["spikes"].keys():
if chn_unit.startswith(spk_startswith):
tep_time = f["spikes"][chn_unit]["times"].value
waveform = f["spikes"][chn_unit]["waveforms"].value
times.append(tep_time)
waveforms.append(waveform)
if times:
times = np.hstack(times)
waveforms = np.vstack(waveforms)
sort_index = np.argsort(times)
waveforms = waveforms[sort_index]
return waveforms
else:
return None
trainModel.py 文件源码
项目:Sound-classification-on-Raspberry-Pi-with-Tensorflow
作者: GianlucaPaolocci
项目源码
文件源码
阅读 38
收藏 0
点赞 0
评论 0
def parse_audio_files(parent_dir,sub_dirs,file_ext='*.wav'):
ignored = 0
features, labels, name = np.empty((0,161)), np.empty(0), np.empty(0)
for label, sub_dir in enumerate(sub_dirs):
print sub_dir
for fn in glob.glob(os.path.join(parent_dir, sub_dir, file_ext)):
try:
mfccs, chroma, mel, contrast, tonnetz = extract_features(fn)
ext_features = np.hstack([mfccs, chroma, mel, contrast, tonnetz])
features = np.vstack([features,ext_features])
l = [fn.split('-')[1]] * (mfccs.shape[0])
labels = np.append(labels, l)
except (KeyboardInterrupt, SystemExit):
raise
except:
ignored += 1
print "Ignored files: ", ignored
return np.array(features), np.array(labels, dtype = np.int)
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois_blob_real = []
for i in xrange(len(im_scale_factors)):
rois, levels = _project_im_rois(im_rois, np.array([im_scale_factors[i]]))
rois_blob = np.hstack((levels, rois))
rois_blob_real.append(rois_blob.astype(np.float32, copy=False))
return rois_blob_real
def _makeflat(self, start=None, end=None, groups = False):
eeg = list()
for sub in self.data[start:end]:
if len(sub) % self.chunk_len == 0:
eeg.append(sub.reshape([-1, self.chunk_len,3]))
else:
print('ERROR: Please choose a chunk length that is a factor of {}. Current len = {}'.format(self.samples_per_epoch, len(sub)))
return [0,0]
hypno = list()
group = list()
hypno_repeat = self.samples_per_epoch / self.chunk_len
idx = 0
for sub in self.hypno[start:end]:
hypno.append(np.repeat(sub, hypno_repeat))
group.append(np.repeat(idx, len(hypno[-1])))
idx += 1
if groups:
return np.vstack(eeg), np.hstack(hypno), np.hstack(group)
else:
return np.vstack(eeg), np.hstack(hypno)
def get_all_features_m(data):
"""
returns a vector with extraced features
:param data: datapoints x samples x dimensions (dimensions: EEG,EMG, EOG)
"""
p = Pool(3)
t1 = p.apply_async(feat_eeg,(data[:,:,0],))
t2 = p.apply_async(feat_eog,(data[:,:,1],))
t3 = p.apply_async(feat_emg,(data[:,:,2],))
eeg = t1.get(timeout = 1200)
eog = t2.get(timeout = 1200)
emg = t3.get(timeout = 1200)
p.close()
p.join()
return np.hstack([eeg,emg,eog])