def _get_data_dims(self, input_fname):
"""Briefly scan the data file for info"""
# raw data formatting is nsamps by nchans + counter
data = np.genfromtxt(input_fname, delimiter=',', comments='%',
skip_footer=1)
diff = np.abs(np.diff(data[:, 0]))
diff = np.mod(diff, 254) - 1
missing_idx = np.where(diff != 0)[0]
missing_samps = diff[missing_idx].astype(int)
nsamps, nchan = data.shape
# add the missing samples
nsamps += sum(missing_samps)
# remove the tracker column
nchan -= 1
del data
return nsamps, nchan
python类mod()的实例源码
def laplace_gpu(y_gpu, mode='valid'):
shape = np.array(y_gpu.shape).astype(np.uint32)
dtype = y_gpu.dtype
block_size = (16,16,1)
grid_size = (int(np.ceil(float(shape[1])/block_size[0])),
int(np.ceil(float(shape[0])/block_size[1])))
shared_size = int((2+block_size[0])*(2+block_size[1])*dtype.itemsize)
preproc = _generate_preproc(dtype, shape)
mod = SourceModule(preproc + kernel_code, keep=True)
if mode == 'valid':
laplace_fun_gpu = mod.get_function("laplace_valid")
laplace_gpu = cua.empty((y_gpu.shape[0]-2, y_gpu.shape[1]-2), y_gpu.dtype)
if mode == 'same':
laplace_fun_gpu = mod.get_function("laplace_same")
laplace_gpu = cua.empty((y_gpu.shape[0], y_gpu.shape[1]), y_gpu.dtype)
laplace_fun_gpu(laplace_gpu.gpudata, y_gpu.gpudata,
block=block_size, grid=grid_size, shared=shared_size)
return laplace_gpu
def paintGL(self):
"""Paint the scene.
"""
self.update_buffer()
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
if (np.mod(self.enctime,4)==0 or ENC==0 or 1):
if (1):
gl.glBindTexture(gl.GL_TEXTURE_2D, self.idtexgl)
gl.glEnable(gl.GL_TEXTURE_2D)
gl.glBegin(gl.GL_QUADS)
gl.glTexCoord2f(0.0, 0.0)
gl.glVertex2f(0, 0);
gl.glTexCoord2f(1.0, 0.0)
gl.glVertex2f( 1.0, 0);
gl.glTexCoord2f(1.0, 1.0)
gl.glVertex2f( 1.0, 1.0);
gl.glTexCoord2f(0.0, 1.0)
gl.glVertex2f(0, 1.0);
gl.glEnd()
else:
gl.glColor4d(0.5,0.7,0.8,0.04)
gl.glEnable(gl.GL_BLEND)
gl.glBlendEquationSeparate( gl.GL_FUNC_ADD, gl.GL_FUNC_ADD);
gl.glBlendFuncSeparate(gl.GL_SRC_ALPHA,gl.GL_ONE_MINUS_SRC_ALPHA, gl.GL_ONE, gl.GL_ONE, gl.GL_ZERO);
# bind the VBO
self.glbuf.bind()
# tell OpenGL that the VBO contains an array of vertices
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
# these vertices contain 2 simple precision coordinates
gl.glVertexPointer(2, gl.GL_FLOAT, 0, self.glbuf)
# draw "count" points from the VBO
gl.glDrawArrays(gl.GL_POINTS, 0, self.count)
self.update()
def _get_slice_(self, t_start, t_stop):
x_beg = numpy.int64(t_start // self.SAMPLES_PER_RECORD)
r_beg = numpy.mod(t_start, self.SAMPLES_PER_RECORD)
x_end = numpy.int64(t_stop // self.SAMPLES_PER_RECORD)
r_end = numpy.mod(t_stop, self.SAMPLES_PER_RECORD)
if x_beg == x_end:
g_offset = x_beg * self.bytes_per_block_div + self.block_offset_div
data_slice = numpy.arange(g_offset + r_beg * self.nb_channels, g_offset + r_end * self.nb_channels, dtype=numpy.int64)
yield data_slice
else:
for count, nb_blocks in enumerate(numpy.arange(x_beg, x_end + 1, dtype=numpy.int64)):
g_offset = nb_blocks * self.bytes_per_block_div + self.block_offset_div
if count == 0:
data_slice = numpy.arange(g_offset + r_beg * self.nb_channels, g_offset + self.block_size_div, dtype=numpy.int64)
elif (count == (x_end - x_beg)):
data_slice = numpy.arange(g_offset, g_offset + r_end * self.nb_channels, dtype=numpy.int64)
else:
data_slice = numpy.arange(g_offset, g_offset + self.block_size_div, dtype=numpy.int64)
yield data_slice
def _get_slice_(self, t_start, t_stop):
x_beg = numpy.int64(t_start // self.SAMPLES_PER_RECORD)
r_beg = numpy.mod(t_start, self.SAMPLES_PER_RECORD)
x_end = numpy.int64(t_stop // self.SAMPLES_PER_RECORD)
r_end = numpy.mod(t_stop, self.SAMPLES_PER_RECORD)
data_slice = []
if x_beg == x_end:
g_offset = x_beg * self.SAMPLES_PER_RECORD + self.OFFSET_PER_BLOCK[0]*(x_beg + 1) + self.OFFSET_PER_BLOCK[1]*x_beg
data_slice = numpy.arange(g_offset + r_beg, g_offset + r_end, dtype=numpy.int64)
else:
for count, nb_blocks in enumerate(numpy.arange(x_beg, x_end + 1, dtype=numpy.int64)):
g_offset = nb_blocks * self.SAMPLES_PER_RECORD + self.OFFSET_PER_BLOCK[0]*(nb_blocks + 1) + self.OFFSET_PER_BLOCK[1]*nb_blocks
if count == 0:
data_slice += numpy.arange(g_offset + r_beg, g_offset + self.SAMPLES_PER_RECORD, dtype=numpy.int64).tolist()
elif (count == (x_end - x_beg)):
data_slice += numpy.arange(g_offset, g_offset + r_end, dtype=numpy.int64).tolist()
else:
data_slice += numpy.arange(g_offset, g_offset + self.SAMPLES_PER_RECORD, dtype=numpy.int64).tolist()
return data_slice
def _get_slice_(self, t_start, t_stop):
x_beg = numpy.int64(t_start // self.SAMPLES_PER_RECORD)
r_beg = numpy.mod(t_start, self.SAMPLES_PER_RECORD)
x_end = numpy.int64(t_stop // self.SAMPLES_PER_RECORD)
r_end = numpy.mod(t_stop, self.SAMPLES_PER_RECORD)
data_slice = []
if x_beg == x_end:
g_offset = x_beg * self.SAMPLES_PER_RECORD + self.OFFSET_PER_BLOCK[0]*(x_beg + 1) + self.OFFSET_PER_BLOCK[1]*x_beg
data_slice = numpy.arange(g_offset + r_beg, g_offset + r_end, dtype=numpy.int64)
else:
for count, nb_blocks in enumerate(numpy.arange(x_beg, x_end + 1, dtype=numpy.int64)):
g_offset = nb_blocks * self.SAMPLES_PER_RECORD + self.OFFSET_PER_BLOCK[0]*(nb_blocks + 1) + self.OFFSET_PER_BLOCK[1]*nb_blocks
if count == 0:
data_slice += numpy.arange(g_offset + r_beg, g_offset + self.SAMPLES_PER_RECORD, dtype=numpy.int64).tolist()
elif (count == (x_end - x_beg)):
data_slice += numpy.arange(g_offset, g_offset + r_end, dtype=numpy.int64).tolist()
else:
data_slice += numpy.arange(g_offset, g_offset + self.SAMPLES_PER_RECORD, dtype=numpy.int64).tolist()
return data_slice
def train_step(self,sess,counter):
'''
This is a generic function that will be called by the Trainer class
once per iteration. The simplest body for this part would be simply
"sess.run(self.train_op)". But you may have more complications.
Running self.summary_op is handeled by Trainer.Supervisor and doesn't
need to be addressed here
Only counters, not epochs are explicitly kept track of
'''
###You can wait until counter>N to do stuff for example:
if self.config.pretrain_LabelerR and counter < self.config.pretrain_LabelerR_no_of_iters:
sess.run(self.d_label_optim)
else:
if np.mod(counter, 3) == 0:
sess.run(self.g_optim)
sess.run([self.train_op,self.k_t_update,self.inc_step])#all ops
else:
sess.run([self.g_optim, self.k_t_update ,self.inc_step])
sess.run(self.g_optim)
def get_min_pos_kinect():
(depth,_) = get_depth()
minVal = np.min(depth) #This is the minimum value from the depth image
minPos = np.argmin(depth) #This is the raw index of the minimum value above
xPos = np.mod(minPos, xSize) #This is the x component of the raw index
yPos = minPos//xSize #This is the y component of the raw index
xList.append(xPos)
del xList[0]
xPos = int(np.mean(xList))
yList.append(yPos)
del yList[0]
yPos = int(np.mean(yList))
return (xSize - xPos-10, yPos, minVal)
def extract_top_plane_nodes(nodefile, top_face):
"""
:param nodefile:
:param top_face:
:return: planeNodeIDs
"""
import numpy as np
import fem_mesh
top_face = np.array(top_face)
nodeIDcoords = fem_mesh.load_nodeIDs_coords(nodefile)
[snic, axes] = fem_mesh.SortNodeIDs(nodeIDcoords)
# extract spatially-sorted node IDs on a the top z plane
axis = int(np.floor(np.divide(top_face.nonzero(), 2)))
if np.mod(top_face.nonzero(), 2) == 1:
plane = (axis, axes[axis].max())
else:
plane = (axis, axes[axis].min())
planeNodeIDs = fem_mesh.extractPlane(snic, axes, plane)
return planeNodeIDs
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def get_op(self):
"""Returns all symmetry operations (including inversions and
subtranslations), but unlike get_symop(), they are returned as
two ndarrays."""
if self.centrosymmetric:
rot = np.tile(np.vstack((self.rotations, -self.rotations)),
(self.nsubtrans, 1, 1))
trans = np.tile(np.vstack((self.translations, -self.translations)),
(self.nsubtrans, 1))
trans += np.repeat(self.subtrans, 2 * len(self.rotations), axis=0)
trans = np.mod(trans, 1)
else:
rot = np.tile(self.rotations, (self.nsubtrans, 1, 1))
trans = np.tile(self.translations, (self.nsubtrans, 1))
trans += np.repeat(self.subtrans, len(self.rotations), axis=0)
trans = np.mod(trans, 1)
return rot, trans
def ecliptic_longitude(hUTC, dayofyear, year):
""" Ecliptic longitude
Args:
hUTC: fractional hour (UTC time)
dayofyear (int):
year (int):
Returns:
(float) the ecliptic longitude (degrees)
Details:
World Meteorological Organization (2006).Guide to meteorological
instruments and methods of observation. Geneva, Switzerland.
"""
jd = julian_date(hUTC, dayofyear, year)
n = jd - 2451545
# mean longitude (deg)
L = numpy.mod(280.46 + 0.9856474 * n, 360)
# mean anomaly (deg)
g = numpy.mod(357.528 + 0.9856003 * n, 360)
return L + 1.915 * numpy.sin(numpy.radians(g)) + 0.02 * numpy.sin(
numpy.radians(2 * g))
def hour_angle(hUTC, dayofyear, year, longitude):
""" Sun hour angle
Args:
hUTC: fractional hour (UTC time)
dayofyear (int):
year (int):
longitude (float): the location longitude (degrees, east positive)
Returns:
(float) the hour angle (hour)
Details:
World Meteorological Organization (2006).Guide to meteorological
instruments and methods of observation. Geneva, Switzerland.
"""
jd = julian_date(hUTC, dayofyear, year)
n = jd - 2451545
gmst = numpy.mod(6.697375 + 0.0657098242 * n + hUTC, 24)
lmst = numpy.mod(gmst + longitude / 15., 24)
ra = right_ascension(hUTC, dayofyear, year)
ha = numpy.mod(lmst - ra / 15. + 12, 24) - 12
return ha
def eot(hUTC, dayofyear, year):
"""equation of time, ie the discrepancy between true solar time and
local solar time
Args:
dayofyear: (int) the day of year
Returns:
(float) the eot disccrepancy (in hour)
Details:
Michalsky, J. J. "The Astronomical Almanac's Algorithm for Approximate
Solar Position (1950-2050)". Solar Energy. Vol. 40, No. 3, 1988;
pp. 227-235, USA
"""
jd = julian_date(hUTC, dayofyear, year)
n = jd - 2451545
# mean longitude (deg)
L = numpy.mod(280.46 + 0.9856474 * n, 360)
ra = right_ascension(hUTC, dayofyear, year)
return (L - ra) / 15.
def comp_ola_gdeconv(xx_gpu, xy_gpu, yx_gpu, yy_gpu, L_gpu, alpha, beta):
"""
Computes the division in Fourier space needed for gdirect deconvolution
"""
sfft = xx_gpu.shape
block_size = (16,16,1)
grid_size = (int(np.ceil(np.float32(sfft[0]*sfft[1])/block_size[0])),
int(np.ceil(np.float32(sfft[2])/block_size[1])))
mod = cu.module_from_buffer(cubin)
comp_ola_gdeconv_Kernel = mod.get_function("comp_ola_gdeconv_Kernel")
z_gpu = cua.zeros(sfft, np.complex64)
comp_ola_gdeconv_Kernel(z_gpu.gpudata,
np.int32(sfft[0]), np.int32(sfft[1]), np.int32(sfft[2]),
xx_gpu, xy_gpu, yx_gpu, yy_gpu, L_gpu.gpudata,
np.float32(alpha), np.float32(beta),
block=block_size, grid=grid_size)
return z_gpu
def crop_gpu2cpu(x_gpu, sz, offset=(0,0)):
sfft = x_gpu.shape
block_size = (16, 16 ,1)
grid_size = (int(np.ceil(np.float32(sfft[1])/block_size[1])),
int(np.ceil(np.float32(sfft[0])/block_size[0])))
if x_gpu.dtype == np.float32:
mod = cu.module_from_buffer(cubin)
cropKernel = mod.get_function("crop_Kernel")
elif x_gpu.dtype == np.complex64:
mod = cu.module_from_buffer(cubin)
cropKernel = mod.get_function("crop_ComplexKernel")
x_cropped_gpu = cua.empty(tuple((int(sz[0]),int(sz[1]))), np.float32)
cropKernel(x_cropped_gpu.gpudata, np.int32(sz[0]), np.int32(sz[1]),
x_gpu.gpudata, np.int32(sfft[0]), np.int32(sfft[1]),
np.int32(offset[0]), np.int32(offset[1]),
block=block_size , grid=grid_size)
return x_cropped_gpu
def comp_ola_sdeconv(gx_gpu, gy_gpu, xx_gpu, xy_gpu, Ftpy_gpu, f_gpu, L_gpu, alpha, beta, gamma=0):
"""
Computes the division in Fourier space needed for sparse deconvolution
"""
sfft = xx_gpu.shape
block_size = (16,16,1)
grid_size = (int(np.ceil(np.float32(sfft[0]*sfft[1])/block_size[0])),
int(np.ceil(np.float32(sfft[2])/block_size[1])))
mod = cu.module_from_buffer(cubin)
comp_ola_sdeconv_Kernel = mod.get_function("comp_ola_sdeconv_Kernel")
z_gpu = cua.zeros(sfft, np.complex64)
comp_ola_sdeconv_Kernel(z_gpu.gpudata,
np.int32(sfft[0]), np.int32(sfft[1]), np.int32(sfft[2]),
gx_gpu.gpudata, gy_gpu.gpudata,
xx_gpu.gpudata, xy_gpu.gpudata,
Ftpy_gpu.gpudata, f_gpu.gpudata, L_gpu.gpudata,
np.float32(alpha), np.float32(beta),
np.float32(gamma),
block=block_size, grid=grid_size)
return z_gpu
def impad_gpu(y_gpu, sf):
sf = np.array(sf)
shape = (np.array(y_gpu.shape) + sf).astype(np.uint32)
dtype = y_gpu.dtype
block_size = (16,16,1)
grid_size = (int(np.ceil(float(shape[1])/block_size[0])),
int(np.ceil(float(shape[0])/block_size[1])))
preproc = _generate_preproc(dtype, shape)
mod = SourceModule(preproc + kernel_code, keep=True)
padded_gpu = cua.empty((int(shape[0]), int(shape[1])), dtype)
impad_fun = mod.get_function("impad")
upper_left = np.uint32(np.floor(sf / 2.))
original_size = np.uint32(np.array(y_gpu.shape))
impad_fun(padded_gpu.gpudata, y_gpu.gpudata,
upper_left[1], upper_left[0],
original_size[0], original_size[1],
block=block_size, grid=grid_size)
return padded_gpu
def laplace_stack_gpu(y_gpu, mode='valid'):
"""
This funtion computes the Laplacian of each slice of a stack of images
"""
shape = np.array(y_gpu.shape).astype(np.uint32)
dtype = y_gpu.dtype
block_size = (6,int(np.floor(512./6./float(shape[0]))),int(shape[0]))
grid_size = (int(np.ceil(float(shape[1])/block_size[0])),
int(np.ceil(float(shape[0])/block_size[1])))
shared_size = int((2+block_size[0])*(2+block_size[1])*(2+block_size[2])
*dtype.itemsize)
preproc = _generate_preproc(dtype, (shape[1],shape[2]))
mod = SourceModule(preproc + kernel_code, keep=True)
laplace_fun_gpu = mod.get_function("laplace_stack_same")
laplace_gpu = cua.empty((y_gpu.shape[0], y_gpu.shape[1], y_gpu.shape[2]),
y_gpu.dtype)
laplace_fun_gpu(laplace_gpu.gpudata, y_gpu.gpudata,
block=block_size, grid=grid_size, shared=shared_size)
return laplace_gpu
def modify_sparse23_gpu(y_gpu, beta):
shape = np.array(y_gpu.shape).astype(np.uint32)
gpu_shape = np.array([np.prod(shape),np.prod(shape)])
gpu_shape = np.uint32(np.ceil(np.sqrt(gpu_shape)))
dtype = y_gpu.dtype
block_size = (16,16,1)
grid_size = (int(np.ceil(float(gpu_shape[1])/block_size[0])),
int(np.ceil(float(gpu_shape[0])/block_size[1])))
preproc = _generate_preproc(dtype, np.array(grid_size)
* np.array(block_size)[0:1])
mod = SourceModule(preproc + kernel_code, keep=True)
modify_alpha23_fun = mod.get_function("modify_alpha23")
modify_alpha23_fun(y_gpu.gpudata, np.float32(beta), np.uint32(np.prod(shape)),
block=block_size, grid=grid_size)
def modify_sparse_gpu(y_gpu, beta, alpha=2/3):
shape = np.array(y_gpu.shape).astype(np.uint32)
gpu_shape = np.array([np.prod(shape),np.prod(shape)])
gpu_shape = np.uint32(np.ceil(np.sqrt(gpu_shape)))
dtype = y_gpu.dtype
block_size = (16,16,1)
grid_size = (int(np.ceil(float(gpu_shape[1])/block_size[0])),
int(np.ceil(float(gpu_shape[0])/block_size[1])))
preproc = _generate_preproc(dtype, np.array(grid_size)
* np.array(block_size)[0:1])
mod = SourceModule(preproc + kernel_code, keep=True)
modify_alpha_fun = mod.get_function("modify_alpha")
modify_alpha_fun(y_gpu.gpudata, np.float32(beta),
np.float32(alpha), np.uint32(np.prod(shape)),
block=block_size, grid=grid_size)
def ola_GPU(xs_gpu, sy, csf, hop):
y_gpu = cua.empty(sy, np.float32)
block_size = (16,16,1)
grid_size = (int(np.ceil(np.float32(sx[0]*sz[0])/block_size[1])),
int(np.ceil(np.float32(sz[1])/block_size[0])))
mod = cu.module_from_buffer(cubin)
copy_Kernel = mod.get_function("copy_Kernel")
for i in range(csf[0]):
for j in range(csf[1]):
copy_Kernel(y_gpu, np.uint32(sy[0]), np.uint32(sy[0]),
xs_gpu, np.uint32(sx[0]), np.uint32(sx[1]), np.uint32(sx[2]),
np.uint32(offset[0]), np.uint32(offset[1]), np.uint32(startrow),
block=block_size, grid=grid_size)
return np.real(y_gpu.get())
def project_on_basis_gpu(fs_gpu, basis_gpu):
basis_length = basis_gpu.shape[0]
shape = np.array(fs_gpu.shape).astype(np.uint32)
dtype = fs_gpu.dtype
block_size = (16,16,1)
grid_size = (1,int(np.ceil(float(basis_length)/block_size[1])))
weights_gpu = cua.empty(basis_length, dtype=dtype)
preproc = _generate_preproc(dtype, shape)
preproc += '#define BLOCK_SIZE %d\n' % (block_size[0]*block_size[1])
mod = SourceModule(preproc + projection_code, keep=True)
projection_fun = mod.get_function("projection")
projection_fun(weights_gpu.gpudata, fs_gpu.gpudata, basis_gpu.gpudata,
np.uint32(basis_length),
block=block_size, grid=grid_size)
def encode(msg):
""" passed a list of bits (integers, 1 or 0), returns a hamming(8,4)-coded
list of bits """
while len(msg) % 4 != 0:
# pad the message to length
msg.append(0)
msg = np.reshape(np.array(msg), (-1, 4))
# create parity bits using transition matrix
transition = np.mat('1,0,0,0,0,1,1,1;\
0,1,0,0,1,0,1,1;\
0,0,1,0,1,1,0,1;\
0,0,0,1,1,1,1,0')
result = np.dot(msg, transition)
# mod 2 the matrix multiplication
return np.mod(result, 2)
def syndrome(msg):
""" passed a list of hamming(8,4)-encoded bits (integers, 1 or 0),
returns an error syndrome for that list """
msg = np.reshape(np.array(msg), (-1, 8)).T
# syndrome generation matrix
transition = np.mat('0,1,1,1,1,0,0,0;\
1,0,1,1,0,1,0,0;\
1,1,0,1,0,0,1,0;\
1,1,1,0,0,0,0,1')
result = np.dot(transition, msg)
# mod 2 the matrix multiplication
return np.mod(result, 2)
def contributions(in_length, out_length, scale, kernel, k_width):
if scale < 1:
h = lambda x: scale * kernel(scale * x)
kernel_width = 1.0 * k_width / scale
else:
h = kernel
kernel_width = k_width
x = np.arange(1, out_length+1).astype(np.float64)
u = x / scale + 0.5 * (1 - 1 / scale)
left = np.floor(u - kernel_width / 2)
P = int(ceil(kernel_width)) + 2
ind = np.expand_dims(left, axis=1) + np.arange(P) - 1 # -1 because indexing from 0
indices = ind.astype(np.int32)
weights = h(np.expand_dims(u, axis=1) - indices - 1) # -1 because indexing from 0
weights = np.divide(weights, np.expand_dims(np.sum(weights, axis=1), axis=1))
aux = np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1))).astype(np.int32)
indices = aux[np.mod(indices, aux.size)]
ind2store = np.nonzero(np.any(weights, axis=0))
weights = weights[:, ind2store]
indices = indices[:, ind2store]
return weights, indices
def calculate_feature_statistics(feature_id):
feature = Feature.objects.get(pk=feature_id)
dataframe = _get_dataframe(feature.dataset.id)
feature_col = dataframe[feature.name]
feature.min = np.amin(feature_col).item()
feature.max = np.amax(feature_col).item()
feature.mean = np.mean(feature_col).item()
feature.variance = np.nanvar(feature_col).item()
unique_values = np.unique(feature_col)
integer_check = (np.mod(unique_values, 1) == 0).all()
feature.is_categorical = integer_check and (unique_values.size < 10)
if feature.is_categorical:
feature.categories = list(unique_values)
feature.save(update_fields=['min', 'max', 'variance', 'mean', 'is_categorical', 'categories'])
del unique_values, feature
def screw_axis(self):
""" The rotation, translation and screw axis from the dual quaternion. """
rotation = 2. * np.degrees(np.arccos(self.q_rot.w))
rotation = np.mod(rotation, 360.)
if (rotation > 1.e-12):
translation = -2. * self.q_dual.w / np.sin(rotation / 2. * np.pi / 180.)
screw_axis = self.q_rot.q[0:3] / np.sin(rotation / 2. * np.pi / 180.)
else:
translation = 2. * np.sqrt(np.sum(np.power(self.q_dual.q[0:3], 2.)))
if (translation > 1.e-12):
screw_axis = 2. * self.q_dual.q[0:3] / translation
else:
screw_axis = np.zeros(3)
# TODO(ntonci): Add axis point for completeness
return screw_axis, rotation, translation
def modIdx(i,l):
"""Returns index of list when
input is larger than list by returning the modulo of the length of
the list.
Useful if lists refer to loop etc.
Args:
i (int): Index.
l (list): Some list.
Returns:
int: New index.
"""
return np.mod(i,len(l))
def getPlotVec(self):
"""Returns vectors for plotting arc.
Returns:
tuple: Tuple containing:
* x (numpy.ndarray): x-array.
* y (numpy.ndarray): y-array.
* z (numpy.ndarray): z-array.
"""
self.getNormVec()
if np.mod(self.angle,np.pi/2.)<0.01:
a = np.linspace(0,self.angle,1000)
else:
a = np.linspace(self.angleOffset-self.angle,self.angleOffset,1000)
x,y,z=self.getPointOnArc(a)
return x,y,z