def make_grid(I, ncols=8):
assert isinstance(I, np.ndarray), 'plugin error, should pass numpy array here'
assert I.ndim == 4 and I.shape[1] == 3
nimg = I.shape[0]
H = I.shape[2]
W = I.shape[3]
ncols = min(nimg, ncols)
nrows = int(np.ceil(float(nimg) / ncols))
canvas = np.zeros((3, H * nrows, W * ncols))
i = 0
for y in range(nrows):
for x in range(ncols):
if i >= nimg:
break
canvas[:, y * H:(y + 1) * H, x * W:(x + 1) * W] = I[i]
i = i + 1
return canvas
python类ceil()的实例源码
def saveHintonPlot(self, matrix, num_tests, max_weight=None, ax=None):
"""Draw Hinton diagram for visualizing a weight matrix."""
fig,ax = plt.subplots(1,1)
if not max_weight:
max_weight = 2**np.ceil(np.log(np.abs(matrix).max())/np.log(2))
ax.patch.set_facecolor('gray')
ax.set_aspect('equal', 'box')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
for (x, y), w in np.ndenumerate(matrix):
color = 'white' if w > 0 else 'black'
size = np.sqrt(np.abs(0.5*w/num_tests)) # Need to scale so that it is between 0 and 0.5
rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,
facecolor=color, edgecolor=color)
ax.add_patch(rect)
ax.autoscale_view()
ax.invert_yaxis()
plt.savefig(self.figures_path + self.save_prefix + '-Hinton.eps')
plt.close()
def fftfilt(b, x, *n):
N_x = len(x)
N_b = len(b)
N = 2**np.arange(np.ceil(np.log2(N_b)),np.floor(np.log2(N_x)))
cost = np.ceil(N_x / (N - N_b + 1)) * N * (np.log2(N) + 1)
N_fft = int(N[np.argmin(cost)])
N_fft = int(N_fft)
# Compute the block length:
L = int(N_fft - N_b + 1)
# Compute the transform of the filter:
H = np.fft.fft(b,N_fft)
y = np.zeros(N_x, x.dtype)
i = 0
while i <= N_x:
il = np.min([i+L,N_x])
k = np.min([i+N_fft,N_x])
yt = np.fft.ifft(np.fft.fft(x[i:il],N_fft)*H,N_fft) # Overlap..
y[i:k] = y[i:k] + yt[:k-i] # and add
i += L
return y
def laplace_gpu(y_gpu, mode='valid'):
shape = np.array(y_gpu.shape).astype(np.uint32)
dtype = y_gpu.dtype
block_size = (16,16,1)
grid_size = (int(np.ceil(float(shape[1])/block_size[0])),
int(np.ceil(float(shape[0])/block_size[1])))
shared_size = int((2+block_size[0])*(2+block_size[1])*dtype.itemsize)
preproc = _generate_preproc(dtype, shape)
mod = SourceModule(preproc + kernel_code, keep=True)
if mode == 'valid':
laplace_fun_gpu = mod.get_function("laplace_valid")
laplace_gpu = cua.empty((y_gpu.shape[0]-2, y_gpu.shape[1]-2), y_gpu.dtype)
if mode == 'same':
laplace_fun_gpu = mod.get_function("laplace_same")
laplace_gpu = cua.empty((y_gpu.shape[0], y_gpu.shape[1]), y_gpu.dtype)
laplace_fun_gpu(laplace_gpu.gpudata, y_gpu.gpudata,
block=block_size, grid=grid_size, shared=shared_size)
return laplace_gpu
def split(args):
if args.skip or args.is_multi_genome:
return {'chunks': [{'__mem_gb': cr_constants.MIN_MEM_GB}]}
chunks = []
min_clusters = cr_constants.MIN_N_CLUSTERS
max_clusters = args.max_clusters if args.max_clusters is not None else cr_constants.MAX_N_CLUSTERS_DEFAULT
matrix_mem_gb = np.ceil(MEM_FACTOR * cr_matrix.GeneBCMatrix.get_mem_gb_from_matrix_h5(args.matrix_h5))
for n_clusters in xrange(min_clusters, max_clusters + 1):
chunk_mem_gb = max(matrix_mem_gb, cr_constants.MIN_MEM_GB)
chunks.append({
'n_clusters': n_clusters,
'__mem_gb': chunk_mem_gb,
})
return {'chunks': chunks}
def expand_to_chunk_size(self, chunk_size, offset=Vec(0,0,0, dtype=int)):
"""
Align a potentially non-axis aligned bbox to the grid by growing it
to the nearest grid lines.
Required:
chunk_size: arraylike (x,y,z), the size of chunks in the
dataset e.g. (64,64,64)
Optional:
offset: arraylike (x,y,z), the starting coordinate of the dataset
"""
chunk_size = np.array(chunk_size, dtype=np.float32)
result = self.clone()
result = result - offset
result.minpt = np.floor(result.minpt / chunk_size) * chunk_size
result.maxpt = np.ceil(result.maxpt / chunk_size) * chunk_size
return result + offset
def shrink_to_chunk_size(self, chunk_size, offset=Vec(0,0,0, dtype=int)):
"""
Align a potentially non-axis aligned bbox to the grid by shrinking it
to the nearest grid lines.
Required:
chunk_size: arraylike (x,y,z), the size of chunks in the
dataset e.g. (64,64,64)
Optional:
offset: arraylike (x,y,z), the starting coordinate of the dataset
"""
chunk_size = np.array(chunk_size, dtype=np.float32)
result = self.clone()
result = result - offset
result.minpt = np.ceil(result.minpt / chunk_size) * chunk_size
result.maxpt = np.floor(result.maxpt / chunk_size) * chunk_size
return result + offset
def _draw_single_box(image, xmin, ymin, xmax, ymax, display_str, font, color='black', thickness=4):
draw = ImageDraw.Draw(image)
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=color)
text_bottom = bottom
# Reverse list and print from bottom to top.
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
return image
def resize_image(image,target_shape, pad_value = 0):
assert isinstance(target_shape, list) or isinstance(target_shape, tuple)
add_shape, subs_shape = [], []
image_shape = image.shape
shape_difference = np.asarray(target_shape, dtype=int) - np.asarray(image_shape,dtype=int)
for diff in shape_difference:
if diff < 0:
subs_shape.append(np.s_[int(np.abs(np.ceil(diff/2))):int(np.floor(diff/2))])
add_shape.append((0, 0))
else:
subs_shape.append(np.s_[:])
add_shape.append((int(np.ceil(1.0*diff/2)),int(np.floor(1.0*diff/2))))
output = np.pad(image, tuple(add_shape), 'constant', constant_values=(pad_value, pad_value))
output = output[subs_shape]
return output
def logTickValues(self, minVal, maxVal, size, stdTicks):
## start with the tick spacing given by tickValues().
## Any level whose spacing is < 1 needs to be converted to log scale
ticks = []
for (spacing, t) in stdTicks:
if spacing >= 1.0:
ticks.append((spacing, t))
if len(ticks) < 3:
v1 = int(np.floor(minVal))
v2 = int(np.ceil(maxVal))
#major = list(range(v1+1, v2))
minor = []
for v in range(v1, v2):
minor.extend(v + np.log10(np.arange(1, 10)))
minor = [x for x in minor if x>minVal and x<maxVal]
ticks.append((None, minor))
return ticks
def renderSymbol(symbol, size, pen, brush, device=None):
"""
Render a symbol specification to QImage.
Symbol may be either a QPainterPath or one of the keys in the Symbols dict.
If *device* is None, a new QPixmap will be returned. Otherwise,
the symbol will be rendered into the device specified (See QPainter documentation
for more information).
"""
## Render a spot with the given parameters to a pixmap
penPxWidth = max(np.ceil(pen.widthF()), 1)
if device is None:
device = QtGui.QImage(int(size+penPxWidth), int(size+penPxWidth), QtGui.QImage.Format_ARGB32)
device.fill(0)
p = QtGui.QPainter(device)
try:
p.setRenderHint(p.Antialiasing)
p.translate(device.width()*0.5, device.height()*0.5)
drawSymbol(p, symbol, size, pen, brush)
finally:
p.end()
return device
def logTickValues(self, minVal, maxVal, size, stdTicks):
## start with the tick spacing given by tickValues().
## Any level whose spacing is < 1 needs to be converted to log scale
ticks = []
for (spacing, t) in stdTicks:
if spacing >= 1.0:
ticks.append((spacing, t))
if len(ticks) < 3:
v1 = int(np.floor(minVal))
v2 = int(np.ceil(maxVal))
#major = list(range(v1+1, v2))
minor = []
for v in range(v1, v2):
minor.extend(v + np.log10(np.arange(1, 10)))
minor = [x for x in minor if x>minVal and x<maxVal]
ticks.append((None, minor))
return ticks
def processBlocks(lines,header,obstimes,svset,headlines,sats):
obstypes = header['# / TYPES OF OBSERV'][1:]
blocks = Panel4D(labels=obstimes,
items=list(svset),
major_axis=obstypes,
minor_axis=['data','lli','ssi'])
ttime1 = 0
ttime2 = 0
for i in range(len(headlines)):
linesinblock = len(sats[i])*int(np.ceil(header['# / TYPES OF OBSERV'][0]/5))
block = ''.join(lines[headlines[i]+1:headlines[i]+linesinblock+1])
t1 = time.time()
bdf = _block2df(block,obstypes,sats[i],len(sats[i]))
ttime1 += (time.time()-t1)
t2 = time.time()
blocks.loc[obstimes[i],sats[i]] = bdf
ttime2 += (time.time()-t2)
print("{0:.2f} seconds for _block2df".format(ttime1))
print("{0:.2f} seconds for panel assignments".format(ttime2))
return blocks
def processBlocks(lines,header,obstimes,svset,headlines,sats):
obstypes = header['# / TYPES OF OBSERV'][1:]
blocks = Panel4D(labels=obstimes,
items=list(svset),
major_axis=obstypes,
minor_axis=['data','lli','ssi'])
ttime1 = 0
ttime2 = 0
for i in range(len(headlines)):
linesinblock = len(sats[i])*int(np.ceil(header['# / TYPES OF OBSERV'][0]/5))
block = ''.join(lines[headlines[i]+1:headlines[i]+linesinblock+1])
t1 = time.time()
bdf = _block2df(block,obstypes,sats[i],len(sats[i]))
ttime1 += (time.time()-t1)
t2 = time.time()
blocks.loc[obstimes[i],sats[i]] = bdf
ttime2 += (time.time()-t2)
print("{0:.2f} seconds for _block2df".format(ttime1))
print("{0:.2f} seconds for panel assignments".format(ttime2))
return blocks
def reset(self):
""" Resets the state of the generator"""
self.step = 0
Y = np.argmax(self.Y,1)
labels = np.unique(Y)
idx = []
smallest = len(Y)
for i,label in enumerate(labels):
where = np.where(Y==label)[0]
if smallest > len(where):
self.slabel = i
smallest = len(where)
idx.append(where)
self.idx = idx
self.labels = labels
self.n_per_class = int(self.batch_size // len(labels))
self.n_batches = int(np.ceil((smallest//self.n_per_class)))+1
self.update_probabilities()
def __init__(self, X, Y, batch_size,cropsize=0, truncate=False, sequential=False,
random=True, val=False, class_weights=None):
assert len(X) == len(Y), 'X and Y must be the same length {}!={}'.format(len(X),len(Y))
if sequential: print('Using sequential mode')
print ('starting normal generator')
self.X = X
self.Y = Y
self.rnd_idx = np.arange(len(Y))
self.Y_last_epoch = []
self.val = val
self.step = 0
self.i = 0
self.cropsize=cropsize
self.truncate = truncate
self.random = False if sequential or val else random
self.batch_size = int(batch_size)
self.sequential = sequential
self.c_weights = class_weights if class_weights else dict(zip(np.unique(np.argmax(Y,1)),np.ones(len(np.argmax(Y,1)))))
assert set(np.argmax(Y,1)) == set([int(x) for x in self.c_weights.keys()]), 'not all labels in class weights'
self.n_batches = int(len(X)//batch_size if truncate else np.ceil(len(X)/batch_size))
if self.random: self.randomize()
def calc_row_col(self, num_ex, num_items):
num_rows_per_ex = int(np.ceil(num_items / self.max_num_col))
if num_items > self.max_num_col:
num_col = self.max_num_col
num_row = num_rows_per_ex * num_ex
else:
num_row = num_ex
num_col = num_items
def calc(ii, jj):
col = jj % self.max_num_col
row = num_rows_per_ex * ii + int(jj / self.max_num_col)
return row, col
return num_row, num_col, calc
def card_strength(self, include_gem=True):
# Base attribute value from naked card
base_attr = np.array([getattr(self, attr.lower()) for attr in attr_list], dtype=float)
# Bonus from bond
bond_bonus = np.array([self.bond*(attr==self.main_attr) for attr in attr_list], dtype=float)
# Compute card-only attribute: base+bond
card_only_attr = base_attr + bond_bonus
if not include_gem:
strength = np.array(card_only_attr, dtype=int).tolist()
else:
gem_type_list = ['Kiss', 'Perfume', 'Ring', 'Cross']
gem_matrix = {gem_type:np.zeros(3) for gem_type in gem_type_list}
for gem in self.equipped_gems:
gem_type = gem.name.split()[1]
if gem_type in gem_type_list:
gem_matrix[gem_type][attr_list.index(gem.attribute)] = gem.value / 100**(gem.effect=='attr_boost')
strength = card_only_attr.copy()
for gem_type in gem_type_list:
if gem_type in ['Kiss', 'Perfume']:
strength += gem_matrix[gem_type]
elif gem_type in ['Ring', 'Cross']:
strength += np.ceil(card_only_attr*gem_matrix[gem_type])
strength = np.array(strength, dtype=int)
return {k.lower()+'*':v for k,v in zip(attr_list, strength)}
def vis_square(data):
"""Take an array of shape (n, height, width) or (n, height, width, 3)
and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)"""
# normalize data for display
data = (data - data.min()) / (data.max() - data.min())
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = (((0, n ** 2 - data.shape[0]),
(0, 1), (0, 1)) # add some space between filters
+ ((0, 0),) * (data.ndim - 3)) # don't pad the last dimension (if there is one)
data = np.pad(data, padding, mode='constant', constant_values=1) # pad with ones (white)
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.imshow(data, interpolation='nearest'); plt.axis('off')
def process(self, wave):
wave.check_mono()
if wave.sample_rate != self.sr:
raise Exception("Wrong sample rate")
n = int(np.ceil(2 * wave.num_frames / float(self.w_len)))
m = (n + 1) * self.w_len / 2
swindow = self.make_signal_window(n)
win_ratios = [self.window / swindow[t * self.w_len / 2 :
t * self.w_len / 2 + self.w_len]
for t in range(n)]
wave = wave.zero_pad(0, int(m - wave.num_frames))
wave = audio.Wave(signal.hilbert(wave), wave.sample_rate)
result = np.zeros((self.n_bins, n))
for b in range(self.n_bins):
w = self.widths[b]
wc = 1 / np.square(w + 1)
filter = self.filters[b]
band = fftfilt(filter, wave.zero_pad(0, int(2 * w))[:,0])
band = band[int(w) : int(w + m), np.newaxis]
for t in range(n):
frame = band[t * self.w_len / 2:
t * self.w_len / 2 + self.w_len,:] * win_ratios[t]
result[b, t] = wc * np.real(np.conj(np.dot(frame.conj().T, frame)))
return audio.Spectrogram(result, self.sr, self.w_len, self.w_len / 2)
def n2mfrow(nr_plots):
"""
Compute the rows and columns given the number
of plots.
This is a port of grDevices::n2mfrow from R
"""
if nr_plots <= 3:
nrow, ncol = nr_plots, 1
elif nr_plots <= 6:
nrow, ncol = (nr_plots + 1) // 2, 2
elif nr_plots <= 12:
nrow, ncol = (nr_plots + 2) // 3, 3
else:
nrow = int(np.ceil(np.sqrt(nr_plots)))
ncol = int(np.ceil(nr_plots/nrow))
return (nrow, ncol)
def get_padding_type(kernel_params, input_shape, output_shape):
'''Translates Caffe's numeric padding to one of ('SAME', 'VALID').
Caffe supports arbitrary padding values, while TensorFlow only
supports 'SAME' and 'VALID' modes. So, not all Caffe paddings
can be translated to TensorFlow. There are some subtleties to
how the padding edge-cases are handled. These are described here:
https://github.com/Yangqing/caffe2/blob/master/caffe2/proto/caffe2_legacy.proto
'''
k_h, k_w, s_h, s_w, p_h, p_w = kernel_params
s_o_h = np.ceil(input_shape.height / float(s_h))
s_o_w = np.ceil(input_shape.width / float(s_w))
if (output_shape.height == s_o_h) and (output_shape.width == s_o_w):
return 'SAME'
v_o_h = np.ceil((input_shape.height - k_h + 1.0) / float(s_h))
v_o_w = np.ceil((input_shape.width - k_w + 1.0) / float(s_w))
if (output_shape.height == v_o_h) and (output_shape.width == v_o_w):
return 'VALID'
return None
def plot_weight_matrix(Z, outname, save=True):
num = Z.shape[0]
fig = plt.figure(1, (80, 80))
fig.subplots_adjust(left=0.05, right=0.95)
grid = AxesGrid(fig, (1, 4, 2), # similar to subplot(142)
nrows_ncols=(int(np.ceil(num / 10.)), 10),
axes_pad=0.04,
share_all=True,
label_mode="L",
)
for i in range(num):
im = grid[i].imshow(Z[i, :, :, :].mean(
axis=0), cmap='gray')
for i in range(grid.ngrids):
grid[i].axis('off')
for cax in grid.cbar_axes:
cax.toggle_label(False)
if save:
fig.savefig(outname, bbox_inches='tight')
fig.clear()
def __init__(self, h, x0=None, **kwargs):
assert type(h) is list, 'h must be a list'
assert len(h) in [2, 3], "TreeMesh is only in 2D or 3D."
if '_levels' in kwargs.keys():
self._levels = kwargs.pop('_levels')
BaseTensorMesh.__init__(self, h, x0, **kwargs)
if self._levels is None:
self._levels = int(np.log2(len(self.h[0])))
# self._levels = levels
self._levelBits = int(np.ceil(np.sqrt(self._levels)))+1
self.__dirty__ = True #: The numbering is dirty!
if '_cells' in kwargs.keys():
self._cells = kwargs.pop('_cells')
else:
self._cells.add(0)
def _optim(self, xys):
idx = np.arange(len(xys))
self.batch_size = np.ceil(len(xys) / self.nbatches)
batch_idx = np.arange(self.batch_size, len(xys), self.batch_size)
for self.epoch in range(1, self.max_epochs + 1):
# shuffle training examples
self._pre_epoch()
shuffle(idx)
# store epoch for callback
self.epoch_start = timeit.default_timer()
# process mini-batches
for batch in np.split(idx, batch_idx):
# select indices for current batch
bxys = [xys[z] for z in batch]
self._process_batch(bxys)
# check callback function, if false return
for f in self.post_epoch:
if not f(self):
break
def dec_round(num, dprec=4, rnd='down', rto_zero=False):
"""
Round up/down numeric ``num`` at specified decimal ``dprec``.
Parameters
----------
num: float
dprec: int
Decimal position for truncation.
rnd: str (default: 'down')
Set as 'up' or 'down' to return a rounded-up or rounded-down value.
rto_zero: bool (default: False)
Use a *round-towards-zero* method, e.g., ``floor(-3.5) == -3``.
Returns
----------
float (default: rounded-up)
"""
dprec = 10**dprec
if rnd == 'up' or (rnd == 'down' and rto_zero and num < 0.):
return np.ceil(num*dprec)/dprec
elif rnd == 'down' or (rnd == 'up' and rto_zero and num < 0.):
return np.floor(num*dprec)/dprec
return np.round(num, dprec)
def update(self, es, **kwargs):
if es.countiter < 2:
self.initialize(es)
self.fit = es.fit.fit
else:
ft1, ft2 = self.fit[int(self.index_to_compare)], self.fit[int(np.ceil(self.index_to_compare))]
ftt1, ftt2 = es.fit.fit[(es.popsize - 1) // 2], es.fit.fit[int(np.ceil((es.popsize - 1) / 2))]
pt2 = self.index_to_compare - int(self.index_to_compare)
# ptt2 = (es.popsize - 1) / 2 - (es.popsize - 1) // 2 # not in use
s = 0
if 1 < 3:
s += pt2 * sum(es.fit.fit <= self.fit[int(np.ceil(self.index_to_compare))])
s += (1 - pt2) * sum(es.fit.fit < self.fit[int(self.index_to_compare)])
s -= es.popsize / 2.
s *= 2. / es.popsize # the range was popsize, is 2
self.s = (1 - self.c) * self.s + self.c * s
es.sigma *= exp(self.s / self.damp)
# es.more_to_write.append(10**(self.s))
#es.more_to_write.append(10**((2 / es.popsize) * (sum(es.fit.fit < self.fit[int(self.index_to_compare)]) - (es.popsize + 1) / 2)))
# # es.more_to_write.append(10**(self.index_to_compare - sum(self.fit <= es.fit.fit[es.popsize // 2])))
# # es.more_to_write.append(10**(np.sign(self.fit[int(self.index_to_compare)] - es.fit.fit[es.popsize // 2])))
self.fit = es.fit.fit
def __init__(self, env, n, max_path_length, scope=None):
if scope is None:
# initialize random scope
scope = str(uuid.uuid4())
envs_per_worker = int(np.ceil(n * 1.0 / singleton_pool.n_parallel))
alloc_env_ids = []
rest_alloc = n
start_id = 0
for _ in range(singleton_pool.n_parallel):
n_allocs = min(envs_per_worker, rest_alloc)
alloc_env_ids.append(list(range(start_id, start_id + n_allocs)))
start_id += n_allocs
rest_alloc = max(0, rest_alloc - envs_per_worker)
singleton_pool.run_each(worker_init_envs, [(alloc, scope, env) for alloc in alloc_env_ids])
self._alloc_env_ids = alloc_env_ids
self._action_space = env.action_space
self._observation_space = env.observation_space
self._num_envs = n
self.scope = scope
self.ts = np.zeros(n, dtype='int')
self.max_path_length = max_path_length
def view_samples(self, show=True):
"""Displays the samples."""
if not self.samples:
return # Nothing to show...
plt.figure("Sample views")
num = len(self.samples)
rows = math.floor(num ** .5)
cols = math.ceil(num / rows)
for idx, img in enumerate(self.samples):
plt.subplot(rows, cols, idx+1)
plt.imshow(img, interpolation='nearest')
if show:
plt.show()
# EXPERIMENT: Try breaking out each output encoder by type instead of
# concatenating them all together. Each type of sensors would then get its own
# HTM. Maybe keep the derivatives with their source?
#
def make_train_test_split(prms):
'''
# I will just make one split and consider the last 5% of the iamges as the val images.
# Randomly sampling in this data is a bad idea, because many images appear together as
# pairs. Selecting from the end will maximize the chances of using unique and different
# imahes in the train and test splits.
'''
# Read the source pairs.
fid = open(prms['paths']['pairList']['raw'],'r')
lines = fid.readlines()
fid.close()
numIm, numPairs = int(lines[0].split()[0]), int(lines[0].split()[1])
lines = lines[1:]
#Make train and val splits
N = len(lines)
trainNum = int(np.ceil(0.95 * N))
trainLines = lines[0:trainNum]
testLines = lines[trainNum:]
_write_pairs(prms['paths']['pairList']['train'], trainLines, numIm)
_write_pairs(prms['paths']['pairList']['test'] , testLines, numIm)
##
# Get the list of tar files for downloading the image data