def torus_faces(x, y):
faces = np.empty((x * y, 4), dtype=np.uint32)
tmp = np.arange(0, x * y)
faces[:, 0] = tmp
faces[:, 1] = np.roll(tmp, -y)
tmp += 1
tmp.shape = (x, y)
tmp[:, y - 1] -= y
tmp.shape = -1
faces[:, 3] = tmp
faces[:, 2] = np.roll(tmp, -y)
faces.shape = -1
l_total = np.empty(x * y, dtype=np.uint32)
l_total[:] = 4
l_start = np.arange(0, (x * y) * 4, 4, dtype=np.uint32)
return SvPolygon(l_start, l_total, faces)
python类uint32()的实例源码
def gen_file(fname, nbytes):
nelems = int(nbytes / 4)
data = np.random.randint(0, 2 ** 32, nelems, dtype=np.uint32)
# zero out 10% so there is something to compress
zeros = np.random.randint(0, nelems, int(nelems / 10.0))
data[zeros] = 0
data = data.tostring()
# write 1GB max at a time - the gzip
# module doesn't like writing >= 4GB
# in one go.
chunksize = 1073741824
while len(data) > 0:
chunk = data[:chunksize]
data = data[chunksize:]
with gzip.open(fname, 'ab') as outf:
outf.write(chunk)
def get_risk_free_rate(self, start_date, end_date):
mongo_dates = self._yield_curve['dates'].find({}, {"_id":0}).sort('date', pymongo.ASCENDING)
_dates = np.array([np.uint32(d['date']) for d in mongo_dates])
tenor = risk_free_helper.get_tenor_for(start_date, end_date)
tenor = tenor[-1] + tenor[:-1]
mongo_data = self._yield_curve[tenor].find({}, {"_id":0})
_table = np.array([d['data'] for d in mongo_data])
d = start_date.year * 10000 + start_date.month * 100 + start_date.day
pos = _dates.searchsorted(d)
if pos > 0 and (pos == len(_dates) or _dates[pos] != d):
pos -= 1
while pos >= 0 and np.isnan(_table[pos]):
# data is missing ...
pos -= 1
return _table[pos]
def available_data_range(self, frequency):
"""
??????????????
:param str frequency: ?????`1d` ?????, `1m` ??????
:return: (earliest, latest)
"""
if frequency == '1d':
mongo_data = self._day_bars[self.INSTRUMENT_TYPE_MAP['INDX']]['000001.XSHG'].find({}, {"_id":0}).sort('date', pymongo.ASCENDING)
mongo_data = list(mongo_data)
s, e = np.uint32(mongo_data[0]['date']), np.uint32(mongo_data[-1]['date'])
return convert_int_to_date(s).date(), convert_int_to_date(e).date()
if frequency == '1m':
raise NotImplementedError
def get_risk_free_rate(self, start_date, end_date):
mongo_dates = self._yield_curve['dates'].find({}, {"_id":0}).sort('date', pymongo.ASCENDING)
_dates = np.array([np.uint32(d['date']) for d in mongo_dates])
tenor = risk_free_helper.get_tenor_for(start_date, end_date)
tenor = tenor[-1] + tenor[:-1]
mongo_data = self._yield_curve[tenor].find({}, {"_id":0})
_table = np.array([d['data'] for d in mongo_data])
d = start_date.year * 10000 + start_date.month * 100 + start_date.day
pos = _dates.searchsorted(d)
if pos > 0 and (pos == len(_dates) or _dates[pos] != d):
pos -= 1
while pos >= 0 and np.isnan(_table[pos]):
# data is missing ...
pos -= 1
return _table[pos]
def available_data_range(self, frequency):
"""
??????????????
:param str frequency: ?????`1d` ?????, `1m` ??????
:return: (earliest, latest)
"""
if frequency == '1d':
mongo_data = self._day_bars[self.INSTRUMENT_TYPE_MAP['INDX']]['000001.XSHG'].find({}, {"_id":0}).sort('date', pymongo.ASCENDING)
mongo_data = list(mongo_data)
s, e = np.uint32(mongo_data[0]['date']), np.uint32(mongo_data[-1]['date'])
return convert_int_to_date(s).date(), convert_int_to_date(e).date()
if frequency == '1m':
raise NotImplementedError
def laplace_stack_gpu(y_gpu, mode='valid'):
"""
This funtion computes the Laplacian of each slice of a stack of images
"""
shape = np.array(y_gpu.shape).astype(np.uint32)
dtype = y_gpu.dtype
block_size = (6,int(np.floor(512./6./float(shape[0]))),int(shape[0]))
grid_size = (int(np.ceil(float(shape[1])/block_size[0])),
int(np.ceil(float(shape[0])/block_size[1])))
shared_size = int((2+block_size[0])*(2+block_size[1])*(2+block_size[2])
*dtype.itemsize)
preproc = _generate_preproc(dtype, (shape[1],shape[2]))
mod = SourceModule(preproc + kernel_code, keep=True)
laplace_fun_gpu = mod.get_function("laplace_stack_same")
laplace_gpu = cua.empty((y_gpu.shape[0], y_gpu.shape[1], y_gpu.shape[2]),
y_gpu.dtype)
laplace_fun_gpu(laplace_gpu.gpudata, y_gpu.gpudata,
block=block_size, grid=grid_size, shared=shared_size)
return laplace_gpu
def laplace3d_gpu(y_gpu):
shape = np.array(y_gpu.shape).astype(np.uint32)
dtype = y_gpu.dtype
block_size = (6,int(np.floor(512./6./float(shape[0]))),int(shape[0]))
grid_size = (int(np.ceil(float(shape[1])/block_size[0])),
int(np.ceil(float(shape[0])/block_size[1])))
shared_size = int((2+block_size[0])*(2+block_size[1])*(2+block_size[2])
*dtype.itemsize)
preproc = _generate_preproc(dtype, (shape[1],shape[2]))
mod = SourceModule(preproc + kernel_code, keep=True)
laplace_fun_gpu = mod.get_function("laplace3d_same")
laplace_gpu = cua.empty((y_gpu.shape[0], y_gpu.shape[1], y_gpu.shape[2]),
y_gpu.dtype)
laplace_fun_gpu(laplace_gpu.gpudata, y_gpu.gpudata,
block=block_size, grid=grid_size, shared=shared_size)
return laplace_gpu
def modify_sparse23_gpu(y_gpu, beta):
shape = np.array(y_gpu.shape).astype(np.uint32)
gpu_shape = np.array([np.prod(shape),np.prod(shape)])
gpu_shape = np.uint32(np.ceil(np.sqrt(gpu_shape)))
dtype = y_gpu.dtype
block_size = (16,16,1)
grid_size = (int(np.ceil(float(gpu_shape[1])/block_size[0])),
int(np.ceil(float(gpu_shape[0])/block_size[1])))
preproc = _generate_preproc(dtype, np.array(grid_size)
* np.array(block_size)[0:1])
mod = SourceModule(preproc + kernel_code, keep=True)
modify_alpha23_fun = mod.get_function("modify_alpha23")
modify_alpha23_fun(y_gpu.gpudata, np.float32(beta), np.uint32(np.prod(shape)),
block=block_size, grid=grid_size)
def modify_sparse_gpu(y_gpu, beta, alpha=2/3):
shape = np.array(y_gpu.shape).astype(np.uint32)
gpu_shape = np.array([np.prod(shape),np.prod(shape)])
gpu_shape = np.uint32(np.ceil(np.sqrt(gpu_shape)))
dtype = y_gpu.dtype
block_size = (16,16,1)
grid_size = (int(np.ceil(float(gpu_shape[1])/block_size[0])),
int(np.ceil(float(gpu_shape[0])/block_size[1])))
preproc = _generate_preproc(dtype, np.array(grid_size)
* np.array(block_size)[0:1])
mod = SourceModule(preproc + kernel_code, keep=True)
modify_alpha_fun = mod.get_function("modify_alpha")
modify_alpha_fun(y_gpu.gpudata, np.float32(beta),
np.float32(alpha), np.uint32(np.prod(shape)),
block=block_size, grid=grid_size)
def project_on_basis_gpu(fs_gpu, basis_gpu):
basis_length = basis_gpu.shape[0]
shape = np.array(fs_gpu.shape).astype(np.uint32)
dtype = fs_gpu.dtype
block_size = (16,16,1)
grid_size = (1,int(np.ceil(float(basis_length)/block_size[1])))
weights_gpu = cua.empty(basis_length, dtype=dtype)
preproc = _generate_preproc(dtype, shape)
preproc += '#define BLOCK_SIZE %d\n' % (block_size[0]*block_size[1])
mod = SourceModule(preproc + projection_code, keep=True)
projection_fun = mod.get_function("projection")
projection_fun(weights_gpu.gpudata, fs_gpu.gpudata, basis_gpu.gpudata,
np.uint32(basis_length),
block=block_size, grid=grid_size)
color_utils.py 文件源码
项目:house-of-enlightenment
作者: house-of-enlightenment
项目源码
文件源码
阅读 33
收藏 0
点赞 0
评论 0
def hsv2rgb(hsv):
hsv = np.array(hsv)
input_shape = hsv.shape
hsv = hsv.reshape(-1, 3)
h, s, v = hsv[:, 0] / 255, hsv[:, 1] / 255, hsv[:, 2]
i = np.uint32(h * 6.0) # pylint: disable=no-member
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
i = i % 6
rgb = np.zeros_like(hsv, np.uint8)
v, t, p, q = v.reshape(-1, 1), t.reshape(-1, 1), p.reshape(-1, 1), q.reshape(-1, 1)
# This could probably be much faster if replaced with np.choose
rgb[i == 0] = np.hstack([v, t, p])[i == 0]
rgb[i == 1] = np.hstack([q, v, p])[i == 1]
rgb[i == 2] = np.hstack([p, v, t])[i == 2]
rgb[i == 3] = np.hstack([p, q, v])[i == 3]
rgb[i == 4] = np.hstack([t, p, v])[i == 4]
rgb[i == 5] = np.hstack([v, p, q])[i == 5]
rgb[s == 0.0] = np.hstack([v, v, v])[s == 0.0]
return rgb.reshape(input_shape)
def table(self):
"""Return a 2D array of UInt32 representing the sequence"""
table = np.zeros((len(self.sequence_items), 6), dtype=np.uint32)
for ct, entry in enumerate(self.sequence_items):
control_word = SequenceControlWord(\
init_marker_sequence = 1 if ct==0 else 0, \
end_marker_sequence = 1 if ct==(len(self.sequence_items) - 1) else 0, \
marker_enable = 1 if isinstance(entry, WaveformEntry) else 0, \
data_cmd_sel = 1 if isinstance(entry, IdleEntry) else 0 \
)
table[ct,0] = control_word.packed
table[ct,1] = self.sequence_loop_ct if ct==0 else 0
if isinstance(entry, WaveformEntry):
table[ct, 2] = entry.loop_ct
table[ct, 3] = entry.segment_id
table[ct, 5] = 0xffffffff
elif isinstance(entry, IdleEntry):
table[ct, 3] = entry.dac_level()
table[ct, 4] = entry.length
else:
raise TypeError("Unhandled sequence entry type")
return table
def generate_ds2_data(max_length, str_w, nout, nbands, batch_size, num_iter):
frame_stride = 0.01 # seconds, hard-coded value in make_aeon_dataloaders
max_utt_len = ((int(max_length / frame_stride) - 1) // str_w) + 1
max_lbl_len = (max_utt_len - 1) // 2
train_set, eval_set = make_fake_dataloader(nbands, max_lbl_len, max_utt_len,
nout, batch_size, num_iter)
inputs = train_set.make_placeholders()
if "audio_length" not in inputs:
inputs["audio_length"] = ng.placeholder([ax.N], dtype=np.uint32)
if "trans_length" not in inputs:
inputs["trans_length"] = ng.placeholder([ax.N], dtype=np.uint32)
return inputs, train_set, eval_set
def _reads(self, addr, length):
if length > 65535:
length = 65535
self.logger.warning("Maximum read-length is %d", length)
header = b'r' + bytes(bytearray([0,
length & 0xFF, (length >> 8) & 0xFF,
addr & 0xFF, (addr >> 8) & 0xFF, (addr >> 16) & 0xFF, (addr >> 24) & 0xFF]))
self.socket.send(header)
data = self.socket.recv(length * 4 + 8)
while (len(data) < length * 4 + 8):
data += self.socket.recv(length * 4 - len(data) + 8)
if data[:8] == header: # check for in-sync transmission
return np.frombuffer(data[8:], dtype=np.uint32)
else: # error handling
self.logger.error("Wrong control sequence from server: %s", data[:8])
self.emptybuffer()
return None
def _writes(self, addr, values):
values = values[:65535 - 2]
length = len(values)
header = b'w' + bytes(bytearray([0,
length & 0xFF,
(length >> 8) & 0xFF,
addr & 0xFF,
(addr >> 8) & 0xFF,
(addr >> 16) & 0xFF,
(addr >> 24) & 0xFF]))
# send header+body
self.socket.send(header +
np.array(values, dtype=np.uint32).tobytes())
if self.socket.recv(8) == header: # check for in-sync transmission
return True # indicate successful write
else: # error handling
self.logger.error("Error: wrong control sequence from server")
self.emptybuffer()
return None
def generate_trainig_data(self, num_points):
'''
Generate training dataset. Produce random (integer) sequences X, and corresponding
expected output sequences Y = generate_output_sequence(X).
Return xy_data, y_data (both of type uint32)
xy_data = numpy array of shape [num_points, in_seq_len + out_seq_len], with each point being X + Y
y_data = numpy array of shape [num_points, out_seq_len]
'''
x_data = np.random.randint(0, self.in_max_int, size=(num_points, self.in_seq_len)) # shape [num_points, in_seq_len]
x_data = x_data.astype(np.uint32) # ensure integer type
y_data = [ self.sequence_pattern.generate_output_sequence(x) for x in x_data ]
y_data = np.array(y_data)
xy_data = np.append(x_data, y_data, axis=1) # shape [num_points, 2*seq_len]
return xy_data, y_data
def numpy2bifrost(dtype):
if dtype == np.int8: return _bf.BF_DTYPE_I8
elif dtype == np.int16: return _bf.BF_DTYPE_I16
elif dtype == np.int32: return _bf.BF_DTYPE_I32
elif dtype == np.uint8: return _bf.BF_DTYPE_U8
elif dtype == np.uint16: return _bf.BF_DTYPE_U16
elif dtype == np.uint32: return _bf.BF_DTYPE_U32
elif dtype == np.float16: return _bf.BF_DTYPE_F16
elif dtype == np.float32: return _bf.BF_DTYPE_F32
elif dtype == np.float64: return _bf.BF_DTYPE_F64
elif dtype == np.float128: return _bf.BF_DTYPE_F128
elif dtype == ci8: return _bf.BF_DTYPE_CI8
elif dtype == ci16: return _bf.BF_DTYPE_CI16
elif dtype == ci32: return _bf.BF_DTYPE_CI32
elif dtype == cf16: return _bf.BF_DTYPE_CF16
elif dtype == np.complex64: return _bf.BF_DTYPE_CF32
elif dtype == np.complex128: return _bf.BF_DTYPE_CF64
elif dtype == np.complex256: return _bf.BF_DTYPE_CF128
else: raise ValueError("Unsupported dtype: " + str(dtype))
def numpy2string(dtype):
if dtype == np.int8: return 'i8'
elif dtype == np.int16: return 'i16'
elif dtype == np.int32: return 'i32'
elif dtype == np.int64: return 'i64'
elif dtype == np.uint8: return 'u8'
elif dtype == np.uint16: return 'u16'
elif dtype == np.uint32: return 'u32'
elif dtype == np.uint64: return 'u64'
elif dtype == np.float16: return 'f16'
elif dtype == np.float32: return 'f32'
elif dtype == np.float64: return 'f64'
elif dtype == np.float128: return 'f128'
elif dtype == np.complex64: return 'cf32'
elif dtype == np.complex128: return 'cf64'
elif dtype == np.complex256: return 'cf128'
else: raise TypeError("Unsupported dtype: " + str(dtype))
Evolution Strategy with Neural Nets.py 文件源码
项目:Evolutionary-Algorithm
作者: MorvanZhou
项目源码
文件源码
阅读 46
收藏 0
点赞 0
评论 0
def train(net_shapes, net_params, optimizer, utility, pool):
# pass seed instead whole noise matrix to parallel will save your time
noise_seed = np.random.randint(0, 2 ** 32 - 1, size=N_KID, dtype=np.uint32).repeat(2) # mirrored sampling
# distribute training in parallel
jobs = [pool.apply_async(get_reward, (net_shapes, net_params, env, CONFIG['ep_max_step'], CONFIG['continuous_a'],
[noise_seed[k_id], k_id], )) for k_id in range(N_KID*2)]
rewards = np.array([j.get() for j in jobs])
kids_rank = np.argsort(rewards)[::-1] # rank kid id by reward
cumulative_update = np.zeros_like(net_params) # initialize update values
for ui, k_id in enumerate(kids_rank):
np.random.seed(noise_seed[k_id]) # reconstruct noise using seed
cumulative_update += utility[ui] * sign(k_id) * np.random.randn(net_params.size)
gradients = optimizer.get_gradients(cumulative_update/(2*N_KID*SIGMA))
return net_params + gradients, rewards