def deriveKernel(self, params, i):
self.checkParamsI(params, i)
ell = np.exp(params[0])
p = np.exp(params[1])
#compute d2
if (self.K_sq is None): d2 = sq_dist(self.X_scaled.T / ell) #precompute squared distances
else: d2 = self.K_sq / ell**2
#compute dp
dp = self.dp/p
K = np.exp(-d2 / 2.0)
if (i==0): return d2*K*np.cos(2*np.pi*dp)
elif (i==1): return 2*np.pi*dp*np.sin(2*np.pi*dp)*K
else: raise Exception('invalid parameter index:' + str(i))
python类pi()的实例源码
def gelu(x):
return 0.5 * x * (1 + T.tanh(T.sqrt(2 / np.pi) * (x + 0.044715 * T.pow(x, 3))))
def get_local_wavenumbermesh(self, scaled=True, broadcast=False,
eliminate_highest_freq=False):
kx = fftfreq(self.N[0], 1./self.N[0])
ky = rfftfreq(self.N[1], 1./self.N[1])
if eliminate_highest_freq:
for i, k in enumerate((kx, ky)):
if self.N[i] % 2 == 0:
k[self.N[i]//2] = 0
Ks = np.meshgrid(kx, ky[self.rank*self.Np[1]//2:(self.rank*self.Np[1]//2+self.Npf)], indexing='ij', sparse=True)
if scaled is True:
Lp = 2*np.pi/self.L
Ks[0] *= Lp[0]
Ks[1] *= Lp[1]
K = Ks
if broadcast is True:
K = [np.broadcast_to(k, self.complex_shape()) for k in Ks]
return K
def _generate_data():
"""
?????
????u(k-1) ? y(k-1)?????y(k)
"""
# u = np.random.uniform(-1,1,200)
# y=[]
# former_y_value = 0
# for i in np.arange(0,200):
# y.append(former_y_value)
# next_y_value = (29.0 / 40) * np.sin(
# (16.0 * u[i] + 8 * former_y_value) / (3.0 + 4.0 * (u[i] ** 2) + 4 * (former_y_value ** 2))) \
# + (2.0 / 10) * u[i] + (2.0 / 10) * former_y_value
# former_y_value = next_y_value
# return u,y
u1 = np.random.uniform(-np.pi,np.pi,200)
u2 = np.random.uniform(-1,1,200)
y = np.zeros(200)
for i in range(200):
value = np.sin(u1[i]) + u2[i]
y[i] = value
return u1, u2, y
def ae(x):
if nonlinearity_name == 'relu':
f = tf.nn.relu
elif nonlinearity_name == 'elu':
f = tf.nn.elu
elif nonlinearity_name == 'gelu':
# def gelu(x):
# return tf.mul(x, tf.erfc(-x / tf.sqrt(2.)) / 2.)
# f = gelu
def gelu_fast(_x):
return 0.5 * _x * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (_x + 0.044715 * tf.pow(_x, 3))))
f = gelu_fast
elif nonlinearity_name == 'silu':
def silu(_x):
return _x * tf.sigmoid(_x)
f = silu
# elif nonlinearity_name == 'soi':
# def soi_map(x):
# u = tf.random_uniform(tf.shape(x))
# mask = tf.to_float(tf.less(u, (1 + tf.erf(x / tf.sqrt(2.))) / 2.))
# return tf.cond(is_training, lambda: tf.mul(mask, x),
# lambda: tf.mul(x, tf.erfc(-x / tf.sqrt(2.)) / 2.))
# f = soi_map
else:
raise NameError("Need 'relu', 'elu', 'gelu', or 'silu' for nonlinearity_name")
h1 = f(tf.matmul(x, W['1']) + b['1'])
h2 = f(tf.matmul(h1, W['2']) + b['2'])
h3 = f(tf.matmul(h2, W['3']) + b['3'])
h4 = f(tf.matmul(h3, W['4']) + b['4'])
h5 = f(tf.matmul(h4, W['5']) + b['5'])
h6 = f(tf.matmul(h5, W['6']) + b['6'])
h7 = f(tf.matmul(h6, W['7']) + b['7'])
return tf.matmul(h7, W['8']) + b['8']
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def getTrainTestKernel(self, params, Xtest):
self.checkParams(params)
ell = np.exp(params[0])
p = np.exp(params[1])
Xtest_scaled = Xtest/np.sqrt(Xtest.shape[1])
d2 = sq_dist(self.X_scaled.T/ell, Xtest_scaled.T/ell) #precompute squared distances
#compute dp
dp = np.zeros(d2.shape)
for d in xrange(self.X_scaled.shape[1]):
dp += (np.outer(self.X_scaled[:,d], np.ones((1, Xtest_scaled.shape[0]))) - np.outer(np.ones((self.X_scaled.shape[0], 1)), Xtest_scaled[:,d]))
dp /= p
K = np.exp(-d2 / 2.0)
return np.cos(2*np.pi*dp)*K
def reset(self,random_start_state=False, assign_state = False, n=None, k = None, \
perturb_params = False, p_LINK_LENGTH_1 = 0, p_LINK_LENGTH_2 = 0, \
p_LINK_MASS_1 = 0, p_LINK_MASS_2 = 0, **kw):
self.t = 0
self.state = np.random.uniform(low=-0.1,high=0.1,size=(4,))
self.LINK_LENGTH_1 = 1. # [m]
self.LINK_LENGTH_2 = 1. # [m]
self.LINK_MASS_1 = 1. #: [kg] mass of link 1
self.LINK_MASS_2 = 1.
if perturb_params:
self.LINK_LENGTH_1 += (self.LINK_LENGTH_1 * p_LINK_LENGTH_1) # [m]
self.LINK_LENGTH_2 += (self.LINK_LENGTH_2 * p_LINK_LENGTH_2) # [m]
self.LINK_MASS_1 += (self.LINK_MASS_1 * p_LINK_MASS_1) #: [kg] mass of link 1
self.LINK_MASS_2 += (self.LINK_MASS_2 * p_LINK_MASS_2) #: [kg] mass of link 2
# The idea here is that we can initialize our batch randomly so that we can get
# more variety in the state space that we attempt to fit a policy to.
if random_start_state:
self.state[:2] = np.random.uniform(-np.pi,np.pi,size=2)
if assign_state:
self.state[0] = wrap((2*k*np.pi)/(1.0*n),-np.pi,np.pi)
def calc_reward(self, action = None, state = None , **kw ):
'''Calculates the continuous reward based on the height of the foot (y position)
with a penalty applied if the hinge is moving (we want the acrobot to be upright
and stationary!), which is then normalized by the combined lengths of the links'''
t = self.target
if state is None:
s = self.state
else:
s = state
# Make sure that input state is clipped/wrapped to the given bounds (not guaranteed when coming from the BNN)
s[0] = wrap( s[0] , -np.pi , np.pi )
s[1] = wrap( s[1] , -np.pi , np.pi )
s[2] = bound( s[2] , -self.MAX_VEL_1 , self.MAX_VEL_1 )
s[3] = bound( s[3] , -self.MAX_VEL_1 , self.MAX_VEL_1 )
hinge, foot = self.get_cartesian_points(s)
reward = -0.05 * (foot[0] - self.LINK_LENGTH_1)**2
terminal = self.is_terminal(s)
return 10 if terminal else reward
def EStep(self):
P = np.zeros((self.M, self.N))
for i in range(0, self.M):
diff = self.X - np.tile(self.TY[i, :], (self.N, 1))
diff = np.multiply(diff, diff)
P[i, :] = P[i, :] + np.sum(diff, axis=1)
c = (2 * np.pi * self.sigma2) ** (self.D / 2)
c = c * self.w / (1 - self.w)
c = c * self.M / self.N
P = np.exp(-P / (2 * self.sigma2))
den = np.sum(P, axis=0)
den = np.tile(den, (self.M, 1))
den[den==0] = np.finfo(float).eps
self.P = np.divide(P, den)
self.Pt1 = np.sum(self.P, axis=0)
self.P1 = np.sum(self.P, axis=1)
self.Np = np.sum(self.P1)
def create_reference_image(size, x0=10., y0=-3., sigma_x=50., sigma_y=30., dtype='float64',
reverse_xaxis=False, correct_axes=True, sizey=None, **kwargs):
"""
Creates a reference image: a gaussian brightness with elliptical
"""
inc_cos = np.cos(0./180.*np.pi)
delta_x = 1.
x = (np.linspace(0., size - 1, size) - size / 2.) * delta_x
if sizey:
y = (np.linspace(0., sizey-1, sizey) - sizey/2.) * delta_x
else:
y = x.copy()
if reverse_xaxis:
xx, yy = np.meshgrid(-x, y/inc_cos)
elif correct_axes:
xx, yy = np.meshgrid(-x, -y/inc_cos)
else:
xx, yy = np.meshgrid(x, y/inc_cos)
image = np.exp(-(xx-x0)**2./sigma_x - (yy-y0)**2./sigma_y)
return image.astype(dtype)
def rotate_point_cloud(batch_data):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in range(batch_data.shape[0]):
rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
shape_pc = batch_data[k, ...]
rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
return rotated_data
def rotate_point_cloud_by_angle(batch_data, rotation_angle):
""" Rotate the point cloud along up direction with certain angle.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in range(batch_data.shape[0]):
#rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
shape_pc = batch_data[k, ...]
rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
return rotated_data
def ac_solve(net):
"""
:param net:
:return:
"""
net.conductance_matrix()
net.dynamic_matrix()
net.rhs_matrix()
# frequency
f = float(net.analysis[-1])
# linear system definition
net.x = spsolve(net.G + 1j * 2 * np.pi * f* net.C, net.rhs)
def thinking(self):
"""Deliberate to avoid obstacles on the path."""
if self.motion.moveIsActive():
# Maneuver occurring. Let's finish it
# before taking any other measure.
pass
elif not self.sensors['proximity'][0].imminent_collision:
# Goes back to moving state.
self.behavior_ = self.BEHAVIORS.moving
elif all(s.imminent_collision for s in self.sensors['proximity']):
# There's nothing left to be done, only flag this is a dead-end.
self.behavior_ = self.BEHAVIORS.stuck
else:
peripheral_sensors = self.sensors['proximity'][1:]
for maneuver, sensor in zip(range(1, 4), peripheral_sensors):
if not sensor.imminent_collision:
# A sensor that indicates no obstacles were found.
# Move in that direction.
self.motion.post.moveTo(0, 0, np.pi / 2)
break
return self
def gaussian_nll(x, mus, sigmas):
"""
NLL for Multivariate Normal with diagonal covariance matrix
See:
wikipedia.org/wiki/Multivariate_normal_distribution#Likelihood_function
where \Sigma = diag(s_1^2,..., s_n^2).
x, mus, sigmas all should have the same shape.
sigmas (s_1,..., s_n) should be strictly positive.
Results in output shape of similar but without the last dimension.
"""
nll = lib.floatX(numpy.log(2. * numpy.pi))
nll += 2. * T.log(sigmas)
nll += ((x - mus) / sigmas) ** 2.
nll = nll.sum(axis=-1)
nll *= lib.floatX(0.5)
return nll
def tsukuba_load_poses(fn):
"""
Retrieve poses
X Y Z R P Y - > X -Y -Z R -P -Y
np.deg2rad(p[3]),-np.deg2rad(p[4]),-np.deg2rad(p[5]),
p[0]*.01,-p[1]*.01,-p[2]*.01, axes='sxyz') for p in P ]
"""
P = np.loadtxt(os.path.expanduser(fn), dtype=np.float64, delimiter=',')
return [ RigidTransform.from_rpyxyz(np.pi, 0, 0, 0, 0, 0) * \
RigidTransform.from_rpyxyz(
np.deg2rad(p[3]),np.deg2rad(p[4]),np.deg2rad(p[5]),
p[0]*.01,p[1]*.01,p[2]*.01, axes='sxyz') * \
RigidTransform.from_rpyxyz(np.pi, 0, 0, 0, 0, 0) for p in P ]
# return [ RigidTransform.from_rpyxyz(
# np.deg2rad(p[3]),-np.deg2rad(p[4]),-np.deg2rad(p[5]),
# p[0]*.01,-p[1]*.01,-p[2]*.01, axes='sxyz') for p in P ]
def __call__(self, z):
z1 = tf.reshape(tf.slice(z, [0, 0], [-1, 1]), [-1])
z2 = tf.reshape(tf.slice(z, [0, 1], [-1, 1]), [-1])
v1 = tf.sqrt((z1 - 5) * (z1 - 5) + z2 * z2) * 2
v2 = tf.sqrt((z1 + 5) * (z1 + 5) + z2 * z2) * 2
v3 = tf.sqrt((z1 - 2.5) * (z1 - 2.5) + (z2 - 2.5 * np.sqrt(3)) * (z2 - 2.5 * np.sqrt(3))) * 2
v4 = tf.sqrt((z1 + 2.5) * (z1 + 2.5) + (z2 + 2.5 * np.sqrt(3)) * (z2 + 2.5 * np.sqrt(3))) * 2
v5 = tf.sqrt((z1 - 2.5) * (z1 - 2.5) + (z2 + 2.5 * np.sqrt(3)) * (z2 + 2.5 * np.sqrt(3))) * 2
v6 = tf.sqrt((z1 + 2.5) * (z1 + 2.5) + (z2 - 2.5 * np.sqrt(3)) * (z2 - 2.5 * np.sqrt(3))) * 2
pdf1 = tf.exp(-0.5 * v1 * v1) / tf.sqrt(2 * np.pi * 0.25)
pdf2 = tf.exp(-0.5 * v2 * v2) / tf.sqrt(2 * np.pi * 0.25)
pdf3 = tf.exp(-0.5 * v3 * v3) / tf.sqrt(2 * np.pi * 0.25)
pdf4 = tf.exp(-0.5 * v4 * v4) / tf.sqrt(2 * np.pi * 0.25)
pdf5 = tf.exp(-0.5 * v5 * v5) / tf.sqrt(2 * np.pi * 0.25)
pdf6 = tf.exp(-0.5 * v6 * v6) / tf.sqrt(2 * np.pi * 0.25)
return -tf.log((pdf1 + pdf2 + pdf3 + pdf4 + pdf5 + pdf6) / 6)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt
x = monotoneTFosc(x)
idx = (x > 0)
x[idx] = x[idx] ** (1 + self.arrexpo[idx] * np.sqrt(x[idx]))
x = self.arrscales * x
# COMPUTATION core
ftrue = 10 * (self.dim - np.sum(np.cos(2 * np.pi * x), -1)) + np.sum(x ** 2, -1)
fval = self.noise(ftrue) # without noise
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (1. / self.condition ** .5) ** linspace(0, 1, dim) # CAVE?
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# decouple scaling from function definition
self.linearTF = dot(self.linearTF, self.rotation)
K = np.arange(0, 12)
self.aK = np.reshape(0.5 ** K, (1, 12))
self.bK = np.reshape(3. ** K, (1, 12))
self.f0 = np.sum(self.aK * np.cos(2 * np.pi * self.bK * 0.5)) # optimal value
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
volume_raycasting_example.py 文件源码
项目:ModernGL-Volume-Raycasting-Example
作者: ulricheck
项目源码
文件源码
阅读 35
收藏 0
点赞 0
评论 0
def pan(self, dx, dy, dz, relative=False):
"""
Moves the center (look-at) position while holding the camera in place.
If relative=True, then the coordinates are interpreted such that x
if in the global xy plane and points to the right side of the view, y is
in the global xy plane and orthogonal to x, and z points in the global z
direction. Distances are scaled roughly such that a value of 1.0 moves
by one pixel on screen.
"""
if not relative:
self.camera_center += QtGui.QVector3D(dx, dy, dz)
else:
cPos = self.cameraPosition()
cVec = self.camera_center - cPos
dist = cVec.length() ## distance from camera to center
xDist = dist * 2. * np.tan(0.5 * self.camera_fov * np.pi / 180.) ## approx. width of view at distance of center point
xScale = xDist / self.width()
zVec = QtGui.QVector3D(0,0,1)
xVec = QtGui.QVector3D.crossProduct(zVec, cVec).normalized()
yVec = QtGui.QVector3D.crossProduct(xVec, zVec).normalized()
self.camera_center = self.camera_center + xVec * xScale * dx + yVec * xScale * dy + zVec * xScale * dz
self.update()
def test_pitch_estimation(self):
"""
test pitch estimation algo with contrived small example
if pitch is within 5 Hz, then say its good (for this small example,
since the algorithm wasn't made for this type of synthesized signal)
"""
cfg = ExperimentConfig(pitch_strength_thresh=-np.inf)
# the next 3 variables are in Hz
tolerance = 5
fs = 48000
f = 150
# create a sine wave of f Hz freq sampled at fs Hz
x = np.sin(2*np.pi * f/fs * np.arange(2**10))
# estimate the pitch, it should be close to f
p, t, s = pest.pitch_estimation(x, fs, cfg)
self.assertTrue(np.all(np.abs(p - f) < tolerance))
def setFromQTransform(self, tr):
p1 = Point(tr.map(0., 0.))
p2 = Point(tr.map(1., 0.))
p3 = Point(tr.map(0., 1.))
dp2 = Point(p2-p1)
dp3 = Point(p3-p1)
## detect flipped axes
if dp2.angle(dp3) > 0:
#da = 180
da = 0
sy = -1.0
else:
da = 0
sy = 1.0
self._state = {
'pos': Point(p1),
'scale': Point(dp2.length(), dp3.length() * sy),
'angle': (np.arctan2(dp2[1], dp2[0]) * 180. / np.pi) + da
}
self.update()
def projectionMatrix(self, region=None):
# Xw = (Xnd + 1) * width/2 + X
if region is None:
region = (0, 0, self.width(), self.height())
x0, y0, w, h = self.getViewport()
dist = self.opts['distance']
fov = self.opts['fov']
nearClip = dist * 0.001
farClip = dist * 1000.
r = nearClip * np.tan(fov * 0.5 * np.pi / 180.)
t = r * h / w
# convert screen coordinates (region) to normalized device coordinates
# Xnd = (Xw - X0) * 2/width - 1
## Note that X0 and width in these equations must be the values used in viewport
left = r * ((region[0]-x0) * (2.0/w) - 1)
right = r * ((region[0]+region[2]-x0) * (2.0/w) - 1)
bottom = t * ((region[1]-y0) * (2.0/h) - 1)
top = t * ((region[1]+region[3]-y0) * (2.0/h) - 1)
tr = QtGui.QMatrix4x4()
tr.frustum(left, right, bottom, top, nearClip, farClip)
return tr
def pan(self, dx, dy, dz, relative=False):
"""
Moves the center (look-at) position while holding the camera in place.
If relative=True, then the coordinates are interpreted such that x
if in the global xy plane and points to the right side of the view, y is
in the global xy plane and orthogonal to x, and z points in the global z
direction. Distances are scaled roughly such that a value of 1.0 moves
by one pixel on screen.
"""
if not relative:
self.opts['center'] += QtGui.QVector3D(dx, dy, dz)
else:
cPos = self.cameraPosition()
cVec = self.opts['center'] - cPos
dist = cVec.length() ## distance from camera to center
xDist = dist * 2. * np.tan(0.5 * self.opts['fov'] * np.pi / 180.) ## approx. width of view at distance of center point
xScale = xDist / self.width()
zVec = QtGui.QVector3D(0,0,1)
xVec = QtGui.QVector3D.crossProduct(zVec, cVec).normalized()
yVec = QtGui.QVector3D.crossProduct(xVec, zVec).normalized()
self.opts['center'] = self.opts['center'] + xVec * xScale * dx + yVec * xScale * dy + zVec * xScale * dz
self.update()
def makeArrowPath(headLen=20, tipAngle=20, tailLen=20, tailWidth=3, baseAngle=0):
"""
Construct a path outlining an arrow with the given dimensions.
The arrow points in the -x direction with tip positioned at 0,0.
If *tipAngle* is supplied (in degrees), it overrides *headWidth*.
If *tailLen* is None, no tail will be drawn.
"""
headWidth = headLen * np.tan(tipAngle * 0.5 * np.pi/180.)
path = QtGui.QPainterPath()
path.moveTo(0,0)
path.lineTo(headLen, -headWidth)
if tailLen is None:
innerY = headLen - headWidth * np.tan(baseAngle*np.pi/180.)
path.lineTo(innerY, 0)
else:
tailWidth *= 0.5
innerY = headLen - (headWidth-tailWidth) * np.tan(baseAngle*np.pi/180.)
path.lineTo(innerY, -tailWidth)
path.lineTo(headLen + tailLen, -tailWidth)
path.lineTo(headLen + tailLen, tailWidth)
path.lineTo(innerY, tailWidth)
path.lineTo(headLen, headWidth)
path.lineTo(0,0)
return path
def setFromQTransform(self, tr):
p1 = Point(tr.map(0., 0.))
p2 = Point(tr.map(1., 0.))
p3 = Point(tr.map(0., 1.))
dp2 = Point(p2-p1)
dp3 = Point(p3-p1)
## detect flipped axes
if dp2.angle(dp3) > 0:
#da = 180
da = 0
sy = -1.0
else:
da = 0
sy = 1.0
self._state = {
'pos': Point(p1),
'scale': Point(dp2.length(), dp3.length() * sy),
'angle': (np.arctan2(dp2[1], dp2[0]) * 180. / np.pi) + da
}
self.update()
def projectionMatrix(self, region=None):
# Xw = (Xnd + 1) * width/2 + X
if region is None:
region = (0, 0, self.width(), self.height())
x0, y0, w, h = self.getViewport()
dist = self.opts['distance']
fov = self.opts['fov']
nearClip = dist * 0.001
farClip = dist * 1000.
r = nearClip * np.tan(fov * 0.5 * np.pi / 180.)
t = r * h / w
# convert screen coordinates (region) to normalized device coordinates
# Xnd = (Xw - X0) * 2/width - 1
## Note that X0 and width in these equations must be the values used in viewport
left = r * ((region[0]-x0) * (2.0/w) - 1)
right = r * ((region[0]+region[2]-x0) * (2.0/w) - 1)
bottom = t * ((region[1]-y0) * (2.0/h) - 1)
top = t * ((region[1]+region[3]-y0) * (2.0/h) - 1)
tr = QtGui.QMatrix4x4()
tr.frustum(left, right, bottom, top, nearClip, farClip)
return tr
def pan(self, dx, dy, dz, relative=False):
"""
Moves the center (look-at) position while holding the camera in place.
If relative=True, then the coordinates are interpreted such that x
if in the global xy plane and points to the right side of the view, y is
in the global xy plane and orthogonal to x, and z points in the global z
direction. Distances are scaled roughly such that a value of 1.0 moves
by one pixel on screen.
"""
if not relative:
self.opts['center'] += QtGui.QVector3D(dx, dy, dz)
else:
cPos = self.cameraPosition()
cVec = self.opts['center'] - cPos
dist = cVec.length() ## distance from camera to center
xDist = dist * 2. * np.tan(0.5 * self.opts['fov'] * np.pi / 180.) ## approx. width of view at distance of center point
xScale = xDist / self.width()
zVec = QtGui.QVector3D(0,0,1)
xVec = QtGui.QVector3D.crossProduct(zVec, cVec).normalized()
yVec = QtGui.QVector3D.crossProduct(xVec, zVec).normalized()
self.opts['center'] = self.opts['center'] + xVec * xScale * dx + yVec * xScale * dy + zVec * xScale * dz
self.update()
def make_wafer(self,wafer_r,frame,label,labelloc,labelwidth):
"""
Generate wafer with primary flat on the left. From https://coresix.com/products/wafers/ I estimated that the angle defining the wafer flat to arctan(flat/2 / radius)
"""
angled = 18
angle = angled*np.pi/180
circ = cad.shapes.Circle((0,0), wafer_r, width=self.boxwidth, initial_angle=180+angled, final_angle=360+180-angled, layer=self.layer_box)
flat = cad.core.Path([(-wafer_r*np.cos(angle),wafer_r*np.sin(angle)),(-wafer_r*np.cos(angle),-wafer_r*np.sin(angle))], width=self.boxwidth, layer=self.layer_box)
date = time.strftime("%d/%m/%Y")
if labelloc==(0,0):
labelloc=(-2e3,wafer_r-1e3)
# The label is added 100 um on top of the main cell
label_grid_chip = cad.shapes.LineLabel( self.name + " " +\
date,500,position=labelloc,
line_width=labelwidth,
layer=self.layer_label)
if frame==True:
self.add(circ)
self.add(flat)
if label==True:
self.add(label_grid_chip)