def add(self,data):
"""
This function allows to append data to the already fitted data
Parameters
----------
data : list, numpy.array, pandas.Series
data to append
"""
if isinstance(data,list):
data = np.array(data)
elif isinstance(data,np.ndarray):
data = data
elif isinstance(data,pd.Series):
data = data.values
else:
print('This data format (%s) is not supported' % type(data))
return
self.data = np.append(self.data,data)
return
python类append()的实例源码
def add(self,data):
"""
This function allows to append data to the already fitted data
Parameters
----------
data : list, numpy.array, pandas.Series
data to append
"""
if isinstance(data,list):
data = np.array(data)
elif isinstance(data,np.ndarray):
data = data
elif isinstance(data,pd.Series):
data = data.values
else:
print('This data format (%s) is not supported' % type(data))
return
self.data = np.append(self.data,data)
return
def add(self,data):
"""
This function allows to append data to the already fitted data
Parameters
----------
data : list, numpy.array, pandas.Series
data to append
"""
if isinstance(data,list):
data = np.array(data)
elif isinstance(data,np.ndarray):
data = data
elif isinstance(data,pd.Series):
data = data.values
else:
print('This data format (%s) is not supported' % type(data))
return
self.data = np.append(self.data,data)
return
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def plotArc(start_angle, stop_angle, radius, width, **kwargs):
""" write a docstring for this function"""
numsegments = 100
theta = np.radians(np.linspace(start_angle+90, stop_angle+90, numsegments))
centerx = 0
centery = 0
x1 = -np.cos(theta) * (radius)
y1 = np.sin(theta) * (radius)
stack1 = np.column_stack([x1, y1])
x2 = -np.cos(theta) * (radius + width)
y2 = np.sin(theta) * (radius + width)
stack2 = np.column_stack([np.flip(x2, axis=0), np.flip(y2,axis=0)])
#add the first values from the first set to close the polygon
np.append(stack2, [[x1[0],y1[0]]], axis=0)
arcArray = np.concatenate((stack1,stack2), axis=0)
return patches.Polygon(arcArray, True, **kwargs), ((x1, y1), (x2, y2))
def eval(self, t):
# given a time vector t, return the design matrix column vector(s)
if self.type is None:
return np.array([])
hl = np.zeros((t.shape[0],))
ht = np.zeros((t.shape[0],))
if self.type in (0,2):
hl[t >= self.year] = np.log10(1 + (t[t >= self.year] - self.year) / self.T)
if self.type in (1,2):
ht[t >= self.year] = 1
return np.append(ht,hl) if np.any(hl) else ht
def LoadParameters(self, C):
s = 0
for jump in self.table:
if not jump.type is None:
if jump.params == 1 and jump.T != 0:
jump.a = np.append(jump.a, C[s:s + 1])
elif jump.params == 1 and jump.T == 0:
jump.b = np.append(jump.b, C[s:s + 1])
elif jump.params == 2:
jump.b = np.append(jump.b, C[s:s + 1])
jump.a = np.append(jump.a, C[s + 1:s + 2])
s = s + jump.params
def trainepoch(self, X, y, epoch_size=1):
self.model.train()
for _ in range(self.nepoch, self.nepoch + epoch_size):
permutation = np.random.permutation(len(X))
all_costs = []
for i in range(0, len(X), self.batch_size):
# forward
idx = torch.LongTensor(permutation[i:i + self.batch_size])
if isinstance(X, torch.cuda.FloatTensor):
idx = idx.cuda()
Xbatch = Variable(X.index_select(0, idx))
ybatch = Variable(y.index_select(0, idx))
if self.cudaEfficient:
Xbatch = Xbatch.cuda()
ybatch = ybatch.cuda()
output = self.model(Xbatch)
# loss
loss = self.loss_fn(output, ybatch)
all_costs.append(loss.data[0])
# backward
self.optimizer.zero_grad()
loss.backward()
# Update parameters
self.optimizer.step()
self.nepoch += epoch_size
def add(self, output, target):
if torch.is_tensor(output):
output = output.cpu().squeeze().numpy()
if torch.is_tensor(target):
target = target.cpu().squeeze().numpy()
elif isinstance(target, numbers.Number):
target = np.asarray([target])
assert np.ndim(output) == 1, \
'wrong output size (1D expected)'
assert np.ndim(target) == 1, \
'wrong target size (1D expected)'
assert output.shape[0] == target.shape[0], \
'number of outputs and targets does not match'
assert np.all(np.add(np.equal(target, 1), np.equal(target, 0))), \
'targets should be binary (0, 1)'
self.scores = np.append(self.scores, output)
self.targets = np.append(self.targets, target)
def set_replay_buffer(self,record):
"""After get reward from environment, Agent should add new record into replay buffer.
Args:
record: dict type, has following key at least:
'reward':
'terminal':
'next_observation':
"""
new_state = self.observation2state(record['observation'])
if type(self.current_state) == dict:
raise Exception("current state type error")
self.replay_buffer.add(self.current_state, record['action'], record['reward'], new_state,
float(record['terminal']), self.current_feature, record['target_ob'])
# self.replayMemory.append([self.current_state,record['action'],record['reward'],new_state,record['terminal'],record['feature']])
# if len(self.replayMemory) > REPLAY_MEMORY:
# self.replayMemory.popleft()
self.current_state = new_state
self.current_feature = list_to_dic(record['observation'])
def plot_trace(n=0, lg=False):
plt.plot(trueC[n], c=col[2], clip_on=False, zorder=5, label='Truth')
plt.plot(solution, c=col[0], clip_on=False, zorder=7, label='Estimate')
plt.plot(y, c=col[7], alpha=.7, lw=1, clip_on=False, zorder=-10, label='Data')
if lg:
plt.legend(frameon=False, ncol=3, loc=(.1, .62), columnspacing=.8)
spks = np.append(0, solution[1:] - g * solution[:-1])
plt.text(800, 2.2, 'Correlation: %.3f' % (np.corrcoef(trueSpikes[n], spks)[0, 1]), size=24)
plt.gca().set_xticklabels([])
simpleaxis(plt.gca())
plt.ylim(0, 2.85)
plt.xlim(0, 1500)
plt.yticks([0, 2], [0, 2])
plt.xticks([300, 600, 900, 1200], ['', ''])
# init params
def longestrunones8(binin):
''' The focus of the test is the longest run of ones within M-bit blocks. The purpose of this test is to determine whether the length of the longest run of ones within the tested sequence is consistent with the length of the longest run of ones that would be expected in a random sequence. Note that an irregularity in the expected length of the longest run of ones implies that there is also an irregularity in the expected length of the longest run of zeroes. Long runs of zeroes were not evaluated separately due to a concern about statistical independence among the tests.'''
m = 8
k = 3
pik = [0.2148, 0.3672, 0.2305, 0.1875]
blocks = [binin[xs*m:m+xs*m:] for xs in xrange(len(binin) / m)]
n = len(blocks)
counts1 = [xs+'01' for xs in blocks] # append the string 01 to guarantee the length of 1
counts = [xs.replace('0',' ').split() for xs in counts1] # split into all parts
counts2 = [map(len, xx) for xx in counts]
counts4 = [(4 if xx > 4 else xx) for xx in map(max,counts2)]
freqs = [counts4.count(spi) for spi in [1, 2, 3, 4]]
chisqr1 = [(freqs[xx]-n*pik[xx])**2/(n*pik[xx]) for xx in xrange(4)]
chisqr = reduce(su, chisqr1)
pval = spc.gammaincc(k / 2.0, chisqr / 2.0)
return pval
def SExtractorCat2fits(sextractorfiles,stringcols=[1],header=73,verbose=True):
"""
Converting an ascii catalog with columns defined in header in the SExtractor format, i.e. one column
name per row preceeded by a "#" and a column numner, and followed by a description (or any ascii file
with the given setup) to a fits binary table
--- INPUT ---
sextractorfiles List of ascii files to convert to fits
stringcols Columns to use a string format for (all other columns will be set to double float)
header Header containing the column names of the catalogs following the "SExtractor notation"
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import glob
import tdose_utilities as tu
catalogs = glob.glob('/Volumes/DATABCKUP2/MUSE-Wide/catalogs_photometry/catalog_photometry_candels-cdfs-*.cat')
tu.SExtractorCat2fits(catalogs,stringcols=[1],header=73,verbose=True)
"""
for sexcat_ascii in sextractorfiles:
asciiinfo = open(sexcat_ascii,'r')
photcols = []
for line in asciiinfo:
if line.startswith('#'):
colname = line.split()[2]
photcols.append(colname)
photfmt = ['D']*len(photcols)
for stringcol in stringcols:
photfmt[stringcol] = 'A60'
sexcat_fits = tu.ascii2fits(sexcat_ascii,asciinames=photcols,skip_header=header,fitsformat=photfmt,verbose=verbose)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def layers(net):
out = []
for elem in net['layer']:
out.append(elem['name'])
for x in elem['top']:
out.append(x)
return set(out)
def get_layer(net,layer_name):
out = []
for elem in net['layer']:
out.append(elem['name'])
out_ind = []
for i,elem in enumerate(out):
if elem == layer_name:
out_ind.append(i)
return out_ind
def warpCloud( xyc, sourceGridPoints, targetGridPoints, warpQuality=9 ):
sourceTree = KDTree(sourceGridPoints, leafsize=10)
warpedXYC = []
for c in xyc:
nearestEdge = sourceTree.query(c,k=warpQuality)
nx = 0.0
ny = 0.0
ws = 0.0
for i in range(warpQuality):
p = targetGridPoints[nearestEdge[1][i]]
w = nearestEdge[0][i]
if w == 0.0:
nx = p[0]
ny = p[1]
ws = 1.0
break
else:
w = 1.0 / w
nx += w * p[0]
ny += w * p[1]
ws += w
warpedXYC.append([nx/ws,ny/ws])
warpedXYC = np.array(warpedXYC)
return warpedXYC
def getCoonsGrid( bounds, width=64, height=64, densities=None, paddingScale=1.0):
targets = []
for yi in range(height):
for xi in range(width):
targets.append(getCoonsPatchPointBez(bounds,xi,yi,width,height,densities=densities))
targets = np.array(targets)
tmean = [np.mean(targets[:,0]),np.mean(targets[:,1])]
targets -= tmean
targets *= paddingScale
targets += tmean
return targets
def __repr__(self):
statements = []
for metric in self.METRIC_NAMES:
value = getattr(self, metric)[-1]
if isinstance(value, list):
if len(value) == 0:
value = np.nan
else:
value = value[-1]
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def load_chunk(group, col_start, col_end):
''' Load a submatrix specified by the given column (barcode) range from an h5 group
Args: col_start, col_end - half-open interval of column indices to load'''
# Check bounds
shape = getattr(group, cr_constants.H5_MATRIX_SHAPE_ATTR).read()
assert col_start >= 0 and col_start < shape[1]
assert col_end >= 0 and col_end <= shape[1]
# Load genes and barcodes
genes = GeneBCMatrix.load_genes_from_h5_group(group)
bcs = GeneBCMatrix.load_bcs_from_h5_group(group)[col_start:col_end]
matrix = GeneBCMatrix(genes, bcs)
# Get views into full matrix
data = getattr(group, cr_constants.H5_MATRIX_DATA_ATTR)
indices = getattr(group, cr_constants.H5_MATRIX_INDICES_ATTR)
indptr = getattr(group, cr_constants.H5_MATRIX_INDPTR_ATTR)
# Determine extents of selected columns
ind_start = indptr[col_start]
if col_end < len(indptr)-1:
# Last index (end-exclusive) is the start of the next column
ind_end = indptr[col_end]
else:
# Last index is the last index in the matrix
ind_end = len(data)
chunk_data = data[ind_start:ind_end]
chunk_indices = indices[ind_start:ind_end]
chunk_indptr = np.append(indptr[col_start:col_end], ind_end) - ind_start
chunk_shape = (shape[0], col_end - col_start)
matrix.m = sp_sparse.csc_matrix((chunk_data, chunk_indices, chunk_indptr), shape=chunk_shape)
return matrix
def load_genomes_from_h5(filename):
genomes = []
with tables.open_file(filename, 'r') as f:
for group in f.list_nodes(f.root):
genome = group._v_name
genomes.append(genome)
return genomes