def gen_samples(self, z0=None, n=32, batch_size=32, use_transform=True):
assert n % batch_size == 0
samples = []
if z0 is None:
z0 = np_rng.uniform(-1., 1., size=(n, self.nz))
else:
n = len(z0)
batch_size = max(n, 64)
n_batches = int(np.ceil(n/float(batch_size)))
for i in range(n_batches):
zmb = floatX(z0[batch_size * i:min(n, batch_size * (i + 1)), :])
xmb = self._gen(zmb)
samples.append(xmb)
samples = np.concatenate(samples, axis=0)
if use_transform:
samples = self.inverse_transform(samples, npx=self.npx, nc=self.nc)
samples = (samples * 255).astype(np.uint8)
return samples
python类ceil()的实例源码
def __init__(self, opt_engine, topK=16, grid_size=None, nps=320, model_name='tmp'):
QWidget.__init__(self)
self.topK = topK
if grid_size is None:
self.n_grid = int(np.ceil(np.sqrt(self.topK)))
self.grid_size = (self.n_grid, self.n_grid) # (width, height)
else:
self.grid_size = grid_size
self.select_id = 0
self.ims = None
self.vis_results = None
self.width = int(np.round(nps/ (4 * float(self.grid_size[1])))) * 4
self.winWidth = self.width * self.grid_size[0]
self.winHeight = self.width * self.grid_size[1]
self.setFixedSize(self.winWidth, self.winHeight)
self.opt_engine = opt_engine
self.frame_id = -1
self.sr = save_result.SaveResult(model_name=model_name)
def rasta_plp_extractor(x, sr, plp_order=0, do_rasta=True):
spec = log_power_spectrum_extractor(x, int(sr*0.02), int(sr*0.01), 'hamming', False)
bark_filters = int(np.ceil(freq2bark(sr//2)))
wts = get_fft_bark_mat(sr, int(sr*0.02), bark_filters)
bark_spec = np.matmul(wts, spec)
if do_rasta:
bark_spec = np.where(bark_spec == 0.0, np.finfo(float).eps, bark_spec)
log_bark_spec = np.log(bark_spec)
rasta_log_bark_spec = rasta_filt(log_bark_spec)
bark_spec = np.exp(rasta_log_bark_spec)
post_spec = postaud(bark_spec, sr/2.)
if plp_order > 0:
lpcas = do_lpc(post_spec, plp_order)
else:
lpcas = post_spec
return lpcas
def _wav_to_framed_samples(wav_audio, hparams):
"""Transforms the contents of a wav file into a series of framed samples."""
y = audio_io.wav_data_to_samples(wav_audio, hparams.sample_rate)
hl = hparams.spec_hop_length
n_frames = int(np.ceil(y.shape[0] / hl))
frames = np.zeros((n_frames, hl), dtype=np.float32)
# Fill in everything but the last frame which may not be the full length
cutoff = (n_frames - 1) * hl
frames[:n_frames - 1, :] = np.reshape(y[:cutoff], (n_frames - 1, hl))
# Fill the last frame
remain_len = len(y[cutoff:])
frames[n_frames - 1, :remain_len] = y[cutoff:]
return frames
def comp_ola_deconv(fs_gpu, ys_gpu, L_gpu, alpha, beta):
"""
Computes the division in Fourier space needed for direct deconvolution
"""
sfft = fs_gpu.shape
block_size = (16,16,1)
grid_size = (int(np.ceil(np.float32(sfft[0]*sfft[1])/block_size[0])),
int(np.ceil(np.float32(sfft[2])/block_size[1])))
mod = cu.module_from_buffer(cubin)
comp_ola_deconv_Kernel = mod.get_function("comp_ola_deconv_Kernel")
z_gpu = cua.zeros(sfft, np.complex64)
comp_ola_deconv_Kernel(z_gpu.gpudata,
np.int32(sfft[0]), np.int32(sfft[1]), np.int32(sfft[2]),
fs_gpu.gpudata, ys_gpu.gpudata, L_gpu.gpudata,
np.float32(alpha), np.float32(beta),
block=block_size, grid=grid_size)
return z_gpu
def crop_gpu2cpu(x_gpu, sz, offset=(0,0)):
sfft = x_gpu.shape
block_size = (16, 16 ,1)
grid_size = (int(np.ceil(np.float32(sfft[1])/block_size[1])),
int(np.ceil(np.float32(sfft[0])/block_size[0])))
if x_gpu.dtype == np.float32:
mod = cu.module_from_buffer(cubin)
cropKernel = mod.get_function("crop_Kernel")
elif x_gpu.dtype == np.complex64:
mod = cu.module_from_buffer(cubin)
cropKernel = mod.get_function("crop_ComplexKernel")
x_cropped_gpu = cua.empty(tuple((int(sz[0]),int(sz[1]))), np.float32)
cropKernel(x_cropped_gpu.gpudata, np.int32(sz[0]), np.int32(sz[1]),
x_gpu.gpudata, np.int32(sfft[0]), np.int32(sfft[1]),
np.int32(offset[0]), np.int32(offset[1]),
block=block_size , grid=grid_size)
return x_cropped_gpu
def comp_ola_sdeconv(gx_gpu, gy_gpu, xx_gpu, xy_gpu, Ftpy_gpu, f_gpu, L_gpu, alpha, beta, gamma=0):
"""
Computes the division in Fourier space needed for sparse deconvolution
"""
sfft = xx_gpu.shape
block_size = (16,16,1)
grid_size = (int(np.ceil(np.float32(sfft[0]*sfft[1])/block_size[0])),
int(np.ceil(np.float32(sfft[2])/block_size[1])))
mod = cu.module_from_buffer(cubin)
comp_ola_sdeconv_Kernel = mod.get_function("comp_ola_sdeconv_Kernel")
z_gpu = cua.zeros(sfft, np.complex64)
comp_ola_sdeconv_Kernel(z_gpu.gpudata,
np.int32(sfft[0]), np.int32(sfft[1]), np.int32(sfft[2]),
gx_gpu.gpudata, gy_gpu.gpudata,
xx_gpu.gpudata, xy_gpu.gpudata,
Ftpy_gpu.gpudata, f_gpu.gpudata, L_gpu.gpudata,
np.float32(alpha), np.float32(beta),
np.float32(gamma),
block=block_size, grid=grid_size)
return z_gpu
def impad_gpu(y_gpu, sf):
sf = np.array(sf)
shape = (np.array(y_gpu.shape) + sf).astype(np.uint32)
dtype = y_gpu.dtype
block_size = (16,16,1)
grid_size = (int(np.ceil(float(shape[1])/block_size[0])),
int(np.ceil(float(shape[0])/block_size[1])))
preproc = _generate_preproc(dtype, shape)
mod = SourceModule(preproc + kernel_code, keep=True)
padded_gpu = cua.empty((int(shape[0]), int(shape[1])), dtype)
impad_fun = mod.get_function("impad")
upper_left = np.uint32(np.floor(sf / 2.))
original_size = np.uint32(np.array(y_gpu.shape))
impad_fun(padded_gpu.gpudata, y_gpu.gpudata,
upper_left[1], upper_left[0],
original_size[0], original_size[1],
block=block_size, grid=grid_size)
return padded_gpu
def laplace_stack_gpu(y_gpu, mode='valid'):
"""
This funtion computes the Laplacian of each slice of a stack of images
"""
shape = np.array(y_gpu.shape).astype(np.uint32)
dtype = y_gpu.dtype
block_size = (6,int(np.floor(512./6./float(shape[0]))),int(shape[0]))
grid_size = (int(np.ceil(float(shape[1])/block_size[0])),
int(np.ceil(float(shape[0])/block_size[1])))
shared_size = int((2+block_size[0])*(2+block_size[1])*(2+block_size[2])
*dtype.itemsize)
preproc = _generate_preproc(dtype, (shape[1],shape[2]))
mod = SourceModule(preproc + kernel_code, keep=True)
laplace_fun_gpu = mod.get_function("laplace_stack_same")
laplace_gpu = cua.empty((y_gpu.shape[0], y_gpu.shape[1], y_gpu.shape[2]),
y_gpu.dtype)
laplace_fun_gpu(laplace_gpu.gpudata, y_gpu.gpudata,
block=block_size, grid=grid_size, shared=shared_size)
return laplace_gpu
def morph(roi):
ratio = min(28. / np.size(roi, 0), 28. / np.size(roi, 1))
roi = cv2.resize(roi, None, fx=ratio, fy=ratio,
interpolation=cv2.INTER_NEAREST)
dx = 28 - np.size(roi, 1)
dy = 28 - np.size(roi, 0)
px = ((int(dx / 2.)), int(np.ceil(dx / 2.)))
py = ((int(dy / 2.)), int(np.ceil(dy / 2.)))
squared = np.pad(roi, (py, px), 'constant', constant_values=0)
return squared
def computePad(dims,depth):
y1=y2=x1=x2=0;
y,x = [numpy.ceil(dims[i]/float(2**depth)) * (2**depth) for i in range(-2,0)]
x = float(x); y = float(y);
y1 = int(numpy.floor((y - dims[-2])/2)); y2 = int(numpy.ceil((y - dims[-2])/2))
x1 = int(numpy.floor((x - dims[-1])/2)); x2 = int(numpy.ceil((x - dims[-1])/2))
return y1,y2,x1,x2
def view_waveforms_clusters(data, halo, threshold, templates, amps_lim, n_curves=200, save=False):
nb_templates = templates.shape[1]
n_panels = numpy.ceil(numpy.sqrt(nb_templates))
mask = numpy.where(halo > -1)[0]
clust_idx = numpy.unique(halo[mask])
fig = pylab.figure()
square = True
center = len(data[0] - 1)//2
for count, i in enumerate(xrange(nb_templates)):
if square:
pylab.subplot(n_panels, n_panels, count + 1)
if (numpy.mod(count, n_panels) != 0):
pylab.setp(pylab.gca(), yticks=[])
if (count < n_panels*(n_panels - 1)):
pylab.setp(pylab.gca(), xticks=[])
subcurves = numpy.where(halo == clust_idx[count])[0]
for k in numpy.random.permutation(subcurves)[:n_curves]:
pylab.plot(data[k], '0.5')
pylab.plot(templates[:, count], 'r')
pylab.plot(amps_lim[count][0]*templates[:, count], 'b', alpha=0.5)
pylab.plot(amps_lim[count][1]*templates[:, count], 'b', alpha=0.5)
xmin, xmax = pylab.xlim()
pylab.plot([xmin, xmax], [-threshold, -threshold], 'k--')
pylab.plot([xmin, xmax], [threshold, threshold], 'k--')
#pylab.ylim(-1.5*threshold, 1.5*threshold)
ymin, ymax = pylab.ylim()
pylab.plot([center, center], [ymin, ymax], 'k--')
pylab.title('Cluster %d' %i)
if nb_templates > 0:
pylab.tight_layout()
if save:
pylab.savefig(os.path.join(save[0], 'waveforms_%s' %save[1]))
pylab.close()
else:
pylab.show()
del fig
def draw_circles(image,cands,origin,spacing):
#make empty matrix, which will be filled with the mask
image_mask = np.zeros(image.shape, dtype=np.int16)
#run over all the nodules in the lungs
for ca in cands.values:
#get middel x-,y-, and z-worldcoordinate of the nodule
#radius = np.ceil(ca[4])/2 ## original: replaced the ceil with a very minor increase of 1% ....
radius = (ca[4])/2 + 0.51 * spacing[0] # increasing by circa half of distance in z direction .... (trying to capture wider region/border for learning ... and adress the rough net .
coord_x = ca[1]
coord_y = ca[2]
coord_z = ca[3]
image_coord = np.array((coord_z,coord_y,coord_x))
#determine voxel coordinate given the worldcoordinate
image_coord = world_2_voxel(image_coord,origin,spacing)
#determine the range of the nodule
#noduleRange = seq(-radius, radius, RESIZE_SPACING[0]) # original, uniform spacing
noduleRange_z = seq(-radius, radius, spacing[0])
noduleRange_y = seq(-radius, radius, spacing[1])
noduleRange_x = seq(-radius, radius, spacing[2])
#x = y = z = -2
#create the mask
for x in noduleRange_x:
for y in noduleRange_y:
for z in noduleRange_z:
coords = world_2_voxel(np.array((coord_z+z,coord_y+y,coord_x+x)),origin,spacing)
#if (np.linalg.norm(image_coord-coords) * RESIZE_SPACING[0]) < radius: ### original (contrained to a uniofrm RESIZE)
if (np.linalg.norm((image_coord-coords) * spacing)) < radius:
image_mask[int(np.round(coords[0])),int(np.round(coords[1])),int(np.round(coords[2]))] = int(1)
return image_mask
def draw_circles(image,cands,origin,spacing):
#make empty matrix, which will be filled with the mask
image_mask = np.zeros(image.shape, dtype=np.int16)
#run over all the nodules in the lungs
for ca in cands.values:
#get middel x-,y-, and z-worldcoordinate of the nodule
#radius = np.ceil(ca[4])/2 ## original: replaced the ceil with a very minor increase of 1% ....
radius = (ca[4])/2 + 0.51 * spacing[0] # increasing by circa half of distance in z direction .... (trying to capture wider region/border for learning ... and adress the rough net .
coord_x = ca[1]
coord_y = ca[2]
coord_z = ca[3]
image_coord = np.array((coord_z,coord_y,coord_x))
#determine voxel coordinate given the worldcoordinate
image_coord = world_2_voxel(image_coord,origin,spacing)
#determine the range of the nodule
#noduleRange = seq(-radius, radius, RESIZE_SPACING[0]) # original, uniform spacing
noduleRange_z = seq(-radius, radius, spacing[0])
noduleRange_y = seq(-radius, radius, spacing[1])
noduleRange_x = seq(-radius, radius, spacing[2])
#x = y = z = -2
#create the mask
for x in noduleRange_x:
for y in noduleRange_y:
for z in noduleRange_z:
coords = world_2_voxel(np.array((coord_z+z,coord_y+y,coord_x+x)),origin,spacing)
#if (np.linalg.norm(image_coord-coords) * RESIZE_SPACING[0]) < radius: ### original (contrained to a uniofrm RESIZE)
if (np.linalg.norm((image_coord-coords) * spacing)) < radius:
image_mask[int(np.round(coords[0])),int(np.round(coords[1])),int(np.round(coords[2]))] = int(1)
return image_mask
def vis_square(visu_path, data, type):
"""Take an array of shape (n, height, width) or (n, height, width , 3)
and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)"""
# normalize data for display
data = (data - data.min()) / (data.max() - data.min())
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = (((0, n ** 2 - data.shape[0]),
(0, 1), (0, 1)) # add some space between filters
+ ((0, 0),) * (data.ndim - 3)) # don't pad the last dimension (if there is one)
data = np.pad(data, padding, mode='constant', constant_values=1) # pad with ones (white)
# tilethe filters into an im age
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.imshow(data[:, :, 0])
plt.axis('off')
if type:
plt.savefig('./{}/weights.png'.format(visu_path), format='png')
else:
plt.savefig('./{}/activation.png'.format(visu_path), format='png')
def get_irlb_mem_gb_from_matrix_dim(nonzero_entries):
irlba_mem_gb = round(np.ceil(1.0 * nonzero_entries / cr_constants.NUM_IRLB_MATRIX_ENTRIES_PER_MEM_GB)) + cr_constants.IRLB_BASE_MEM_GB
return cr_constants.MATRIX_MEM_GB_MULTIPLIER * max(cr_constants.MIN_MEM_GB, irlba_mem_gb)
def compute_percentile_from_distribution(counter, percentile):
""" Takes a Counter object (or value:frequency dict) and computes a single percentile.
Uses Type 7 interpolation from:
Hyndman, R.J.; Fan, Y. (1996). "Sample Quantiles in Statistical Packages".
"""
assert 0 <= percentile <= 100
n = np.sum(counter.values())
h = (n-1)*(percentile/100.0)
lower_value = None
cum_sum = 0
for value, freq in sorted(counter.items()):
cum_sum += freq
if cum_sum > np.floor(h) and lower_value is None:
lower_value = value
if cum_sum > np.ceil(h):
return lower_value + (h-np.floor(h)) * (value-lower_value)
# Test for compute_percentile_from_distribution()
#def test_percentile(x, p):
# c = Counter()
# for xi in x:
# c[xi] += 1
# my_res = np.array([compute_percentile_from_distribution(c, p_i) for p_i in p], dtype=float)
# numpy_res = np.percentile(x, p)
# print np.sum(np.abs(numpy_res - my_res))
def get_mem_gb_from_matrix_dim(nonzero_entries):
''' Estimate memory usage of loading a matrix. '''
matrix_mem_gb = round(np.ceil(1.0 * nonzero_entries / cr_constants.NUM_MATRIX_ENTRIES_PER_MEM_GB))
return cr_constants.MATRIX_MEM_GB_MULTIPLIER * max(cr_constants.MIN_MEM_GB, matrix_mem_gb)
def split(args):
# Need to store umi_info and a json with a dict containing 1 key per barcode
umi_info_mem_gb = 2*int(np.ceil(vdj_umi_info.get_mem_gb(args.umi_info)))
bc_diversity = len(cr_utils.load_barcode_whitelist(args.barcode_whitelist))
assemble_summary_mem_gb = tk_stats.robust_divide(bc_diversity, DICT_BCS_PER_MEM_GB)
return {
'chunks': [{
'__mem_gb': int(np.ceil(max(cr_constants.MIN_MEM_GB, umi_info_mem_gb + assemble_summary_mem_gb))),
}]
}
def split(args):
chunks = []
for reads_per_bc_file, bam, gem_group in itertools.izip(args.reads_per_bc,
args.barcode_chunked_bams,
args.chunk_gem_groups):
subsample_rate = args.subsample_rate[str(gem_group)]
with open(reads_per_bc_file) as f:
reads_per_bc = []
for line in f:
_, reads = line.strip().split()
reads_per_bc.append(float(reads) * subsample_rate)
max_reads = np.max(reads_per_bc + [0.0])
# vdj_asm is hard-coded to use a maximum of 200k reads / BC.
max_reads = min(MAX_READS_PER_BC, max_reads)
# The assembly step takes roughly num_reads * MEM_BYTES_PER_READ bytes of memory to complete each BC.
mem_gb = max(2.0, int(np.ceil(MEM_BYTES_PER_READ * max_reads / 1e9)))
chunks.append({
'chunked_bam': bam,
'gem_group': gem_group,
'__mem_gb': mem_gb,
})
# If there were no input reads, create a dummy chunk
if not chunks:
chunks.append({'chunked_bam': None})
return {'chunks': chunks, 'join': {'__threads': 4}}