def __prepare_controls_and_actions(self):
self.__discrete_controls_to_net = np.array([i for i in range(len(self.__discrete_controls))
if i not in self.__discrete_controls_manual])
self.__num_manual_controls = len(self.__discrete_controls_manual)
self.__net_discrete_actions = []
if not self.__opposite_button_pairs:
for perm in it.product([False, True], repeat=len(self.__discrete_controls_to_net)):
self.__net_discrete_actions.append(list(perm))
else:
for perm in it.product([False, True], repeat=len(self.__discrete_controls_to_net)):
act = list(perm)
valid = True
for button_p in self.__opposite_button_pairs:
if act[button_p[0]] and act[button_p[1]]:
valid = False
if valid:
self.__net_discrete_actions.append(act)
self.__num_net_discrete_actions = len(self.__net_discrete_actions)
self.__action_to_index = {tuple(val): ind for (ind, val) in enumerate(self.__net_discrete_actions)}
self.__net_discrete_actions = np.array(self.__net_discrete_actions)
self.__onehot_discrete_acitons = np.eye(self.__num_net_discrete_actions)
python类eye()的实例源码
def initialize_match_matrix(self):
"""
Construct the initial match matrix.
Returns:
--------
match_matrix: array
The match matrix
"""
# TODO add possibility for slack
match_matrix = np.zeros((self.reactants_elements.size, self.products_elements.size))
# set sub blocks of the match matrix to one plus random pertubation
# followed by column normalization
for indices in self.element_type_subset_indices:
match_matrix[indices] = 1 + 1e-3 * np.random.random(match_matrix[indices].shape)
match_matrix[indices] /= match_matrix[indices].sum(0)
#match_matrix = np.eye(match_matrix.shape[0])
#for i,j in [(0,0),(0,4),(1,1),(1,3),(4,0),(4,4),(3,3),(3,1),(7,7),(7,11),(8,8),(8,10),(20,20),(20,24),(21,21),(21,23),(11,7),(11,11),(10,8),(10,10),(24,20),(24,24),(23,23),(23,21)]:
# match_matrix[i,j] = 0.5
return match_matrix
def test_nonzero_twodim(self):
x = np.array([[0, 1, 0], [2, 0, 3]])
assert_equal(np.count_nonzero(x), 3)
assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2]))
x = np.eye(3)
assert_equal(np.count_nonzero(x), 3)
assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2]))
x = np.array([[(0, 1), (0, 0), (1, 11)],
[(1, 1), (1, 0), (0, 0)],
[(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')])
assert_equal(np.count_nonzero(x['a']), 4)
assert_equal(np.count_nonzero(x['b']), 5)
assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1]))
assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2]))
assert_(not x['a'].T.flags.aligned)
assert_equal(np.count_nonzero(x['a'].T), 4)
assert_equal(np.count_nonzero(x['b'].T), 5)
assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0]))
assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2]))
def test_matrix_rank(self):
# Full rank matrix
yield assert_equal, 4, matrix_rank(np.eye(4))
# rank deficient matrix
I = np.eye(4)
I[-1, -1] = 0.
yield assert_equal, matrix_rank(I), 3
# All zeros - zero rank
yield assert_equal, matrix_rank(np.zeros((4, 4))), 0
# 1 dimension - rank 1 unless all 0
yield assert_equal, matrix_rank([1, 0, 0, 0]), 1
yield assert_equal, matrix_rank(np.zeros((4,))), 0
# accepts array-like
yield assert_equal, matrix_rank([1]), 1
# greater than 2 dimensions raises error
yield assert_raises, TypeError, matrix_rank, np.zeros((2, 2, 2))
# works on scalar
yield assert_equal, matrix_rank(1), 1
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.newbyteorder(native)
sw_arr = arr.newbyteorder('S').byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
def test_100(self):
x, w = leg.leggauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = leg.legvander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = 2.0
assert_almost_equal(w.sum(), tgt)
def _fwlinear(self, args, output_size, scope=None):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
assert len(args) == 2
assert args[0].get_shape().as_list()[1] == output_size
dtype = [a.dtype for a in args][0]
with vs.variable_scope(scope or "Linear"):
matrixW = vs.get_variable(
"MatrixW", dtype=dtype, initializer=tf.convert_to_tensor(np.eye(output_size, dtype=np.float32) * .05))
matrixC = vs.get_variable(
"MatrixC", [args[1].get_shape().as_list()[1], output_size], dtype=dtype)
res = tf.matmul(args[0], matrixW) + tf.matmul(args[1], matrixC)
return res
def getValuesFromPose(self, P):
'''return the virtual values of the pots corresponding to the pose P'''
vals = []
grads = []
for i, r, l, placement, attach_p in zip(range(3), self.rs, self.ls, self.placements, self.attach_ps):
#first pot axis
a = placement.rot * col([1, 0, 0])
#second pot axis
b = placement.rot * col([0, 1, 0])
#string axis
c = placement.rot * col([0, 0, 1])
#attach point on the joystick
p_joystick = P * attach_p
v = p_joystick - placement.trans
va = v - dot(v, a)*a
vb = v - dot(v, b)*b
#angles of the pots
alpha = math.atan2(dot(vb, a), dot(vb, c))
beta = math.atan2(dot(va, b), dot(va, c))
vals.append(alpha)
vals.append(beta)
#calculation of the derivatives
dv = np.bmat([-P.rot.mat() * quat.skew(attach_p), P.rot.mat()])
dva = (np.eye(3) - a*a.T) * dv
dvb = (np.eye(3) - b*b.T) * dv
dalpha = (1/dot(vb,vb)) * (dot(vb,c) * a.T - dot(vb,a) * c.T) * dvb
dbeta = (1/dot(va,va)) * (dot(va,c) * b.T - dot(va,b) * c.T) * dva
grads.append(dalpha)
grads.append(dbeta)
return (col(vals), np.bmat([[grads]]))
def prepare_cholesky(N=100, dtype=np.double):
N = int(N*2)
A = np.asarray(np.random.rand(N, N), dtype=dtype)
return ( A*A.transpose() + N*np.eye(N), )
#return toc/trials, N*N*N/3.0*1e-9, times
#inv: return toc/trials, 2*N*N*N*1e-9, times
##################################################################################
def update_filter(self, timestep, estimate, ranges):
"""Update position filter.
Args:
timestep (float): Time elapsed since last update.
estimate (StateEstimate): Position estimate to update.
ranges (list of floats): Range measurements.
Returns:
new_estimate (StateEstimate): Updated position estimate.
outlier_flag (bool): Flag indicating whether the measurement was rejected as an outlier.
"""
num_of_units = len(ranges)
x = estimate.state
P = estimate.covariance
# Compute process matrix and covariance matrices
F, Q, R = self._compute_process_and_covariance_matrices(timestep)
# rospy.logdebug('F: {}'.format(F))
# rospy.logdebug('Q: {}'.format(Q))
# rospy.logdebug('R: {}'.format(R))
# Prediction
x = np.dot(F, x)
P = np.dot(F, np.dot(P, F.T)) + Q
# Update
n = np.copy(x)
H = np.zeros((num_of_units, x.size))
z = np.zeros((num_of_units, 1))
h = np.zeros((num_of_units, 1))
for i in xrange(self.ikf_iterations):
n, K, outlier_flag = self._ikf_iteration(x, n, ranges, h, H, z, estimate, R)
if outlier_flag:
new_estimate = estimate
else:
new_state = n
new_covariance = np.dot((np.eye(6) - np.dot(K, H)), P)
new_estimate = UWBTracker.StateEstimate(new_state, new_covariance)
return new_estimate, outlier_flag
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def getTrainKernel(self, params):
self.checkParams(params)
return np.eye(self.n)
def AorthogonalityCheck(A, U, d):
"""
Test the frobenious norm of D^{-1}(U^TAU) - I_k
"""
V = np.zeros(U.shape)
AV = np.zeros(U.shape)
Av = Vector()
v = Vector()
A.init_vector(Av,0)
A.init_vector(v,1)
nvec = U.shape[1]
for i in range(0,nvec):
v.set_local(U[:,i])
v *= 1./math.sqrt(d[i])
A.mult(v,Av)
AV[:,i] = Av.get_local()
V[:,i] = v.get_local()
VtAV = np.dot(V.T, AV)
err = VtAV - np.eye(nvec, dtype=VtAV.dtype)
# plt.imshow(np.abs(err))
# plt.colorbar()
# plt.show()
print("i, ||Vt(i,:)AV(:,i) - I_i||_F, V[:,i] = 1/sqrt(lambda_i) U[:,i]")
for i in range(1,nvec+1):
print(i, np.linalg.norm(err[0:i,0:i], 'fro') )
def chol_inv(B, lower=True):
"""
Returns the inverse of matrix A, where A = B*B.T,
ie B is the Cholesky decomposition of A.
Solves Ax = I
given B is the cholesky factorization of A.
"""
return cho_solve((B, lower), np.eye(B.shape[0]))
def test_theta_0():
rng.seed(0)
n_samples = 100
Y = rng.randn(n_samples, 5)
X = rng.randn(n_samples, 5)
sgcrf = SparseGaussianCRF(lamL=0.01, lamT=0.01)
sgcrf.fit(X, Y)
assert np.allclose(sgcrf.Lam, np.eye(5), .1, .2)
def init_layers(X, Z, dims, final_mean_function):
M = Z.shape[0]
q_mus, q_sqrts, mean_functions, Zs = [], [], [], []
X_running, Z_running = X.copy(), Z.copy()
for dim_in, dim_out in zip(dims[:-2], dims[1:-1]):
if dim_in == dim_out: # identity for same dims
W = np.eye(dim_in)
elif dim_in > dim_out: # use PCA mf for stepping down
_, _, V = np.linalg.svd(X_running, full_matrices=False)
W = V[:dim_out, :].T
elif dim_in < dim_out: # identity + pad with zeros for stepping up
I = np.eye(dim_in)
zeros = np.zeros((dim_in, dim_out - dim_in))
W = np.concatenate([I, zeros], 1)
mean_functions.append(Linear(A=W))
Zs.append(Z_running.copy())
q_mus.append(np.zeros((M, dim_out)))
q_sqrts.append(np.eye(M)[:, :, None] * np.ones((1, 1, dim_out)))
Z_running = Z_running.dot(W)
X_running = X_running.dot(W)
# final layer (as before but no mean function)
mean_functions.append(final_mean_function)
Zs.append(Z_running.copy())
q_mus.append(np.zeros((M, dims[-1])))
q_sqrts.append(np.eye(M)[:, :, None] * np.ones((1, 1, dims[-1])))
return q_mus, q_sqrts, Zs, mean_functions
def eye_mask(self, shape):
"""
Build a mask using np.eye.
"""
return ~np.eye(*shape, dtype=bool)
def gl_update_joint_matrices(self,node,parent_joint=None,parent_joint_matrix=numpy.eye(3,4,dtype=numpy.float32)):
for child in node.children:
if child.node_type == j3d.inf1.NodeType.JOINT:
joint = self.gl_joints[child.index]
joint_matrix = self.gl_joint_matrices[child.index]
joint_matrix[:] = joint.create_matrix(parent_joint,parent_joint_matrix)
self.gl_update_joint_matrices(child,joint,joint_matrix)
else:
self.gl_update_joint_matrices(child,parent_joint,parent_joint_matrix)
def KeyGen_test():
A, S, Aq_bar, S_bar = KeyGen(q = 7,n = 5, m = 7, alpha=1)
print Aq_bar
print S_bar
print A
print S
print np.matmul(A,S) - 7*np.eye(5, dtype=int)
def __init__(self, Y, R=None, t=None, maxIterations=100, gamma=0.1, ):
if Y is None:
raise 'Empty list of point clouds!'
dimensions = [cloud.shape[1] for cloud in Y]
if not all(dimension == dimensions[0] for dimension in dimensions):
raise 'All point clouds must have the same number of dimensions!'
self.Y = Y
self.M = [cloud.shape[0] for cloud in self.Y]
self.D = dimensions[0]
if R:
rotations = [rotation.shape for rotation in R]
if not all(rotation[0] == self.D and rotation[1] == self.D for rotation in rotations):
raise 'All rotation matrices need to be %d x %d matrices!' % (self.D, self.D)
self.R = R
else:
self.R = [np.eye(self.D) for cloud in Y]
if t:
translations = [translations.shape for translation in t]
if not all(translations[0] == 1 and translations[1] == self.D for translation in translations):
raise 'All translation vectors need to be 1 x %d matrices!' % (self.D)
self.t = t
else:
self.t = [np.atleast_2d(np.zeros((1, self.D))) for cloud in self.Y]