def _block2df(block,obstypes,svnames,svnum):
"""
input: block of text corresponding to one time increment INTERVAL of RINEX file
output: 2-D array of float64 data from block. Future: consider whether best to use Numpy, Pandas, or Xray.
"""
nobs = len(obstypes)
stride=3
strio = BytesIO(block.encode())
barr = np.genfromtxt(strio, delimiter=(14,1,1)*5).reshape((svnum,-1), order='C')
data = barr[:,0:nobs*stride:stride]
lli = barr[:,1:nobs*stride:stride]
ssi = barr[:,2:nobs*stride:stride]
data = np.vstack(([data.T],[lli.T],[ssi.T])).T
return data
python类vstack()的实例源码
def _block2df(block,obstypes,svnames,svnum):
"""
input: block of text corresponding to one time increment INTERVAL of RINEX file
output: 2-D array of float64 data from block.
"""
nobs = len(obstypes)
stride=3
strio = BytesIO(block.encode())
barr = np.genfromtxt(strio, delimiter=(14,1,1)*5).reshape((svnum,-1), order='C')
data = barr[:,0:nobs*stride:stride]
lli = barr[:,1:nobs*stride:stride]
ssi = barr[:,2:nobs*stride:stride]
data = np.vstack(([data],[lli],[ssi])).T #4D numpy array
return data
def get_score_bounds_from_range(Z_min, Z_max, rho_lb, rho_ub, L0_max = None):
"global variables: L0_reg_ind"
edge_values = np.vstack([Z_min * rho_lb,
Z_max * rho_lb,
Z_min * rho_ub,
Z_max * rho_ub])
if L0_max is None or L0_max == Z_min.shape[0]:
s_min = np.sum(np.min(edge_values, axis = 0))
s_max = np.sum(np.max(edge_values, axis = 0))
else:
min_values = np.min(edge_values, axis = 0)
s_min_reg = np.sum(np.sort(min_values[L0_reg_ind])[0:L0_max])
s_min_no_reg = np.sum(min_values[~L0_reg_ind])
s_min = s_min_reg + s_min_no_reg
max_values = np.max(edge_values, axis = 0)
s_max_reg = np.sum(-np.sort(-max_values[L0_reg_ind])[0:L0_max])
s_max_no_reg = np.sum(max_values[~L0_reg_ind])
s_max = s_max_reg + s_max_no_reg
return s_min, s_max
#setup weights
def get_score_bounds(Z_min, Z_max, rho_lb, rho_ub, L0_reg_ind = None, L0_max = None):
edge_values = np.vstack([Z_min * rho_lb,
Z_max * rho_lb,
Z_min * rho_ub,
Z_max * rho_ub])
if (L0_max is None) or (L0_reg_ind is None) or (L0_max == Z_min.shape[0]):
s_min = np.sum(np.min(edge_values, axis=0))
s_max = np.sum(np.max(edge_values, axis=0))
else:
min_values = np.min(edge_values, axis=0)
s_min_reg = np.sum(np.sort(min_values[L0_reg_ind])[0:L0_max])
s_min_no_reg = np.sum(min_values[~L0_reg_ind])
s_min = s_min_reg + s_min_no_reg
max_values = np.max(edge_values, axis=0)
s_max_reg = np.sum(-np.sort(-max_values[L0_reg_ind])[0:L0_max])
s_max_no_reg = np.sum(max_values[~L0_reg_ind])
s_max = s_max_reg + s_max_no_reg
return s_min, s_max
def _makeflat(self, start=None, end=None, groups = False):
eeg = list()
for sub in self.data[start:end]:
if len(sub) % self.chunk_len == 0:
eeg.append(sub.reshape([-1, self.chunk_len,3]))
else:
print('ERROR: Please choose a chunk length that is a factor of {}. Current len = {}'.format(self.samples_per_epoch, len(sub)))
return [0,0]
hypno = list()
group = list()
hypno_repeat = self.samples_per_epoch / self.chunk_len
idx = 0
for sub in self.hypno[start:end]:
hypno.append(np.repeat(sub, hypno_repeat))
group.append(np.repeat(idx, len(hypno[-1])))
idx += 1
if groups:
return np.vstack(eeg), np.hstack(hypno), np.hstack(group)
else:
return np.vstack(eeg), np.hstack(hypno)
def natural_key(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
#%%
#l=a['feat_eeg']
#val_acc = [y[0] for y in [x for x in l]]
#val_f1 = [y[1] for y in [x for x in l]]
#test_acc = [y[2] for y in [x for x in l]]
#test_f1 = [y[3] for y in [x for x in l]]
#
#val = np.vstack([val_acc, val_f1]).T
#test = np.vstack([test_acc, test_f1]).T
#a = pickle.load(open('./results_dataset_feat_edfx','rb'))
def enroll(self, enroll_features):
"""enroll(enroll_features) -> model
Enrolls the model by storing all given input vectors.
**Parameters:**
enroll_features : [1D :py:class:`numpy.ndarray`]
The list of projected features to enroll the model from.
**Returns:**
model : 2D :py:class:`numpy.ndarray`
The enrolled model.
"""
assert len(enroll_features)
[self._check_feature(feature, True) for feature in enroll_features]
# just store all the features
return numpy.vstack(enroll_features)
def enroll(self, enroll_features):
"""enroll(enroll_features) -> model
Enrolls the model by storing all given input vectors.
**Parameters:**
``enroll_features`` : [:py:class:`numpy.ndarray`]
The list of projected features to enroll the model from.
**Returns:**
``model`` : 2D :py:class:`numpy.ndarray`
The enrolled model.
"""
assert len(enroll_features)
[self._check_feature(feature) for feature in enroll_features]
# just store all the features
return numpy.vstack(f.flatten() for f in enroll_features)
def train_projector(self, training_features, projector_file):
"""Generates the PCA covariance matrix and writes it into the given projector_file.
**Parameters:**
training_features : [1D :py:class:`numpy.ndarray`]
A list of 1D training arrays (vectors) to train the PCA projection matrix with.
projector_file : str
A writable file, into which the PCA projection matrix (as a :py:class:`bob.learn.linear.Machine`) and the eigenvalues will be written.
"""
# Assure that all data are 1D
[self._check_feature(feature) for feature in training_features]
# Initializes the data
data = numpy.vstack(training_features)
logger.info(" -> Training LinearMachine using PCA")
t = bob.learn.linear.PCATrainer()
self.machine, self.variances = t.train(data)
# For re-shaping, we need to copy...
self.variances = self.variances.copy()
# compute variance percentage, if desired
if isinstance(self.subspace_dim, float):
cummulated = numpy.cumsum(self.variances) / numpy.sum(self.variances)
for index in range(len(cummulated)):
if cummulated[index] > self.subspace_dim:
self.subspace_dim = index
break
self.subspace_dim = index
logger.info(" ... Keeping %d PCA dimensions", self.subspace_dim)
# re-shape machine
self.machine.resize(self.machine.shape[0], self.subspace_dim)
self.variances.resize(self.subspace_dim)
f = bob.io.base.HDF5File(projector_file, "w")
f.set("Eigenvalues", self.variances)
f.create_group("Machine")
f.cd("/Machine")
self.machine.save(f)
def enroll(self, enroll_features):
"""enroll(enroll_features) -> model
Enrolls the model by storing all given input vectors.
**Parameters:**
enroll_features : [1D :py:class:`numpy.ndarray`]
The list of projected features to enroll the model from.
**Returns:**
model : 2D :py:class:`numpy.ndarray`
The enrolled model.
"""
assert len(enroll_features)
[self._check_feature(feature, True) for feature in enroll_features]
# just store all the features
return numpy.vstack(enroll_features)
def _scores_d_normalize(t_model_ids, group):
"""Compute normalized D scores for the given T-model ids"""
# the file selector object
fs = FileSelector.instance()
# initialize D and D_same_value matrices
d_for_all = None
d_same_value = None
for t_model_id in t_model_ids:
tmp = bob.io.base.load(fs.d_file(t_model_id, group))
tmp2 = bob.io.base.load(fs.d_same_value_file(t_model_id, group))
if d_for_all is None and d_same_value is None:
d_for_all = tmp
d_same_value = tmp2
else:
d_for_all = numpy.vstack((d_for_all, tmp))
d_same_value = numpy.vstack((d_same_value, tmp2))
# Saves to files
bob.io.base.save(d_for_all, fs.d_matrix_file(group))
bob.io.base.save(d_same_value, fs.d_same_value_matrix_file(group))
def x_frame2D(X, plot_limits=None, resolution=None):
"""
Internal helper function for making plots, returns a set of input values to plot as well as lower and upper limits
"""
assert X.shape[1] == 2, \
'x_frame2D is defined for two-dimensional inputs'
if plot_limits is None:
(xmin, xmax) = (X.min(0), X.max(0))
(xmin, xmax) = (xmin - 0.2 * (xmax - xmin), xmax + 0.2 * (xmax
- xmin))
elif len(plot_limits) == 2:
(xmin, xmax) = plot_limits
else:
raise ValueError, 'Bad limits for plotting'
resolution = resolution or 50
(xx, yy) = np.mgrid[xmin[0]:xmax[0]:1j * resolution, xmin[1]:
xmax[1]:1j * resolution]
Xnew = np.vstack((xx.flatten(), yy.flatten())).T
return (Xnew, xx, yy, xmin, xmax)
def loadX(fname):
'''
Read data records into a data matrix.
Also return vocabulary.
'''
events = []
words_keys = set()
for e in load(fname):
events.append(e)
words_keys = words_keys | set(e[5].keys())
words_keys = sorted(list(words_keys))
for (eidx, e) in enumerate(events):
events[eidx] = list(e[:5]) + [e[5].get(word_key, 0)
for word_key in words_keys]
X = np.vstack(events)
return (X, words_keys)
def get_document_batch(self, doc_id):
"""builds batch of all mention pairs in one document
Args:
doc_id: id of document
Returns:
feature representation of mentions and labels
"""
mentions = self.dl.get_all_mentions_from_doc(doc_id)
if len(mentions) == 0:
return None, None
A, B = [], []
for a in mentions:
for b in mentions:
A.append(a)
B.append(b)
A_f = [self._mention_to_features(m) for m in A]
B_f = [self._mention_to_features(m) for m in B]
AB_f = self._pair_features(A, B)
A = [self.dl.mention_features[m] for m in A]
B = [self.dl.mention_features[m] for m in B]
return np.vstack(A), np.stack(A_f), np.vstack(B), np.stack(B_f), np.stack(AB_f)
def log(p):
q = p.rot
t = p.trans
r = quat.log(q)
D = quat.dlog(r)
return np.vstack((r, D * t))
def plotFields(layer,fieldShape=None,channel=None,figOffset=1,cmap=None,padding=0.01):
# Receptive Fields Summary
try:
W = layer.W
except:
W = layer
wp = W.eval().transpose();
if len(np.shape(wp)) < 4: # Fully connected layer, has no shape
fields = np.reshape(wp,list(wp.shape[0:-1])+fieldShape)
else: # Convolutional layer already has shape
features, channels, iy, ix = np.shape(wp)
if channel is not None:
fields = wp[:,channel,:,:]
else:
fields = np.reshape(wp,[features*channels,iy,ix])
perRow = int(math.floor(math.sqrt(fields.shape[0])))
perColumn = int(math.ceil(fields.shape[0]/float(perRow)))
fig = mpl.figure(figOffset); mpl.clf()
# Using image grid
from mpl_toolkits.axes_grid1 import ImageGrid
grid = ImageGrid(fig,111,nrows_ncols=(perRow,perColumn),axes_pad=padding,cbar_mode='single')
for i in range(0,np.shape(fields)[0]):
im = grid[i].imshow(fields[i],cmap=cmap);
grid.cbar_axes[0].colorbar(im)
mpl.title('%s Receptive Fields' % layer.name)
# old way
# fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
# tiled = []
# for i in range(0,perColumn*perRow,perColumn):
# tiled.append(np.hstack(fields2[i:i+perColumn]))
#
# tiled = np.vstack(tiled)
# mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Receptive Fields' % layer.name); mpl.colorbar();
mpl.figure(figOffset+1); mpl.clf(); mpl.imshow(np.sum(np.abs(fields),0),cmap=cmap); mpl.title('%s Total Absolute Input Dependency' % layer.name); mpl.colorbar()
def plotOutput(layer,feed_dict,fieldShape=None,channel=None,figOffset=1,cmap=None):
# Output summary
try:
W = layer.output
except:
W = layer
wp = W.eval(feed_dict=feed_dict);
if len(np.shape(wp)) < 4: # Fully connected layer, has no shape
temp = np.zeros(np.product(fieldShape)); temp[0:np.shape(wp.ravel())[0]] = wp.ravel()
fields = np.reshape(temp,[1]+fieldShape)
else: # Convolutional layer already has shape
wp = np.rollaxis(wp,3,0)
features, channels, iy,ix = np.shape(wp)
if channel is not None:
fields = wp[:,channel,:,:]
else:
fields = np.reshape(wp,[features*channels,iy,ix])
perRow = int(math.floor(math.sqrt(fields.shape[0])))
perColumn = int(math.ceil(fields.shape[0]/float(perRow)))
fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
tiled = []
for i in range(0,perColumn*perRow,perColumn):
tiled.append(np.hstack(fields2[i:i+perColumn]))
tiled = np.vstack(tiled)
if figOffset is not None:
mpl.figure(figOffset); mpl.clf();
mpl.imshow(tiled,cmap=cmap); mpl.title('%s Output' % layer.name); mpl.colorbar();
def plotFields(layer,fieldShape=None,channel=None,maxFields=25,figName='ReceptiveFields',cmap=None,padding=0.01):
# Receptive Fields Summary
W = layer.W
wp = W.eval().transpose();
if len(np.shape(wp)) < 4: # Fully connected layer, has no shape
fields = np.reshape(wp,list(wp.shape[0:-1])+fieldShape)
else: # Convolutional layer already has shape
features, channels, iy, ix = np.shape(wp)
if channel is not None:
fields = wp[:,channel,:,:]
else:
fields = np.reshape(wp,[features*channels,iy,ix])
fieldsN = min(fields.shape[0],maxFields)
perRow = int(math.floor(math.sqrt(fieldsN)))
perColumn = int(math.ceil(fieldsN/float(perRow)))
fig = mpl.figure(figName); mpl.clf()
# Using image grid
from mpl_toolkits.axes_grid1 import ImageGrid
grid = ImageGrid(fig,111,nrows_ncols=(perRow,perColumn),axes_pad=padding,cbar_mode='single')
for i in range(0,fieldsN):
im = grid[i].imshow(fields[i],cmap=cmap);
grid.cbar_axes[0].colorbar(im)
mpl.title('%s Receptive Fields' % layer.name)
# old way
# fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
# tiled = []
# for i in range(0,perColumn*perRow,perColumn):
# tiled.append(np.hstack(fields2[i:i+perColumn]))
#
# tiled = np.vstack(tiled)
# mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Receptive Fields' % layer.name); mpl.colorbar();
mpl.figure(figName+' Total'); mpl.clf(); mpl.imshow(np.sum(np.abs(fields),0),cmap=cmap); mpl.title('%s Total Absolute Input Dependency' % layer.name); mpl.colorbar()
def plotOutput(layer,feed_dict,fieldShape=None,channel=None,figOffset=1,cmap=None):
# Output summary
W = layer.output
wp = W.eval(feed_dict=feed_dict);
if len(np.shape(wp)) < 4: # Fully connected layer, has no shape
temp = np.zeros(np.product(fieldShape)); temp[0:np.shape(wp.ravel())[0]] = wp.ravel()
fields = np.reshape(temp,[1]+fieldShape)
else: # Convolutional layer already has shape
wp = np.rollaxis(wp,3,0)
features, channels, iy,ix = np.shape(wp)
if channel is not None:
fields = wp[:,channel,:,:]
else:
fields = np.reshape(wp,[features*channels,iy,ix])
perRow = int(math.floor(math.sqrt(fields.shape[0])))
perColumn = int(math.ceil(fields.shape[0]/float(perRow)))
fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
tiled = []
for i in range(0,perColumn*perRow,perColumn):
tiled.append(np.hstack(fields2[i:i+perColumn]))
tiled = np.vstack(tiled)
if figOffset is not None:
mpl.figure(figOffset); mpl.clf();
mpl.imshow(tiled,cmap=cmap); mpl.title('%s Output' % layer.name); mpl.colorbar();
def get_tm_opp(pts1, pts2):
# Transformation matrix - ( Translation + Scaling + Rotation )
# using Procuster analysis
pts1 = np.float64(pts1)
pts2 = np.float64(pts2)
m1 = np.mean(pts1, axis = 0)
m2 = np.mean(pts2, axis = 0)
# Removing translation
pts1 -= m1
pts2 -= m2
std1 = np.std(pts1)
std2 = np.std(pts2)
std_r = std2/std1
# Removing scaling
pts1 /= std1
pts2 /= std2
U, S, V = np.linalg.svd(np.transpose(pts1) * pts2)
# Finding the rotation matrix
R = np.transpose(U * V)
return np.vstack([np.hstack((std_r * R,
np.transpose(m2) - std_r * R * np.transpose(m1))), np.matrix([0.0, 0.0, 1.0])])