def rel_to_abs_f(vector, cell):
"""
Converts a position vector in interal coordinates to absolut coordinates
in Angstroem for a film structure (2D).
"""
# TODO this currently only works if the z-coordinate is the one with no pbc
# Therefore if a structure with x non pbc is given this should also work.
# maybe write a 'tranform film to fleur_film routine'?
if len(vector) == 3:
postionR = np.array(vector)
postionR_f = np.array(postionR[:2])
#print postionR_f
cell_np = np.array(cell)
cell_np = np.array(cell_np[0:2, 0:2])
#print cell_np
new_xy = [i for i in np.matmul(postionR_f, cell_np)]
new_abs_pos_f = [new_xy[0], new_xy[1], postionR[2]]
return new_abs_pos_f
else:
return False
python类matmul()的实例源码
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_numpy_ufunc_override(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
def mul(self, matrix):
'''Multiply this matrix by `matrix`
The order of operation is: `this @ matrix`.
*Parameters:*
- `matrix`: `Matrix4`
'''
# Make a matrix4 shape to matmul function
view1 = np.reshape(self._values, (4, 4))
view2 = np.reshape(matrix.values, (4, 4))
self.tmp.shape = (4, 4)
# np.matmul(view2, view1, out=out)
np.matmul(view2, view1, out=self.tmp)
self.tmp.shape = (16,)
self._values[:] = self.tmp
return self
BidirectionNet_tfidf.py 文件源码
项目:Sohu-LuckData-Image-Text-Matching-Competition
作者: WeitaoVan
项目源码
文件源码
阅读 40
收藏 0
点赞 0
评论 0
def select_negtive(self, i_feat, s_feat, sess, topN=50):
'''
Select the triplets with the largest losses \n
return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
'''
feed_dict = {self.image_feat: i_feat, self.sentence_feat:s_feat}
i_embed, s_embed = sess.run([self.image_fc2, self.sentence_fc2], feed_dict=feed_dict)
S = np.matmul(i_embed, s_embed.T)
i_feat_pos = i_feat.repeat(topN, axis=0)
s_feat_pos = s_feat.repeat(topN, axis=0)
N = S.shape[0]
np.fill_diagonal(S, -2*np.ones(N))
neg_s_idx = S.argsort(axis=1)[:, -topN:]
neg_i_idx = S.argsort(axis=0)[-topN:, :]
s_feat_neg = s_feat[neg_s_idx.flatten('C')]
i_feat_neg = i_feat[neg_i_idx.flatten('F')]
return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
BidirectionNet_4wtfidf.py 文件源码
项目:Sohu-LuckData-Image-Text-Matching-Competition
作者: WeitaoVan
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def select_negtive(self, i_feat, s_feat, sess, topN=50):
'''
Select the triplets with the largest losses \n
return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
'''
feed_dict = {self.image_feat: i_feat, self.sentence_feat:s_feat}
i_embed, s_embed = sess.run([self.image_fc2, self.sentence_fc2], feed_dict=feed_dict)
S = np.matmul(i_embed, s_embed.T)
i_feat_pos = i_feat.repeat(topN, axis=0)
s_feat_pos = s_feat.repeat(topN, axis=0)
N = S.shape[0]
np.fill_diagonal(S, -2*np.ones(N))
neg_s_idx = S.argsort(axis=1)[:, -topN:]
neg_i_idx = S.argsort(axis=0)[-topN:, :]
s_feat_neg = s_feat[neg_s_idx.flatten('C')]
i_feat_neg = i_feat[neg_i_idx.flatten('F')]
return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
def get_xyz(interface, xyz_from_camera):
angles = interface.current_status.angles[0:3]
# Get current XYZ
P0t = DobotModel.forward_kinematics(angles)
# Getting Desired XYZ of end effector
Pct = np.array(CAMERA_OFFSET)
R0t = DobotModel.R0T(angles)
Rtc = np.array([[0, 1, 0], [1, 0, 0], [0, 0, -1]])
R0c = np.matmul(R0t, Rtc)
Pta = np.matmul(R0c, xyz_from_camera) - np.matmul(R0c, Pct)
target = np.reshape(Pta, (3, 1)) + np.reshape(P0t, (3, 1))
return target
# FUNCTION: Touch - Place the end effector on top of an AR tag
# AR TAGS: DUCKY = 0 DUCKYBOT = 1 OBSTACLE = 2
def __compute_valid_convolution_nd(data, kernel, dimension: int):
convolution_shape = tuple(data.shape[i] - kernel.shape[i] + 1 for i in range(-1, -dimension - 1, -1))
list_dimension = reduce(lambda a, b: a * b, convolution_shape)
data_prefix = data.shape[:-dimension]
kernel_flat = kernel.ravel()
data_flat = numpy.zeros(data_prefix + (list_dimension, len(kernel_flat)))
for i in range(list_dimension):
tensor_slice_start = [0] * len(kernel.shape)
tensor_slice = [slice(None)] * len(data.shape)
tensor_slice_start[-1] = i
for r in range(-1, -len(kernel.shape) - 1, -1):
dimension_scale = data.shape[r] - kernel.shape[r] + 1
if tensor_slice_start[r] >= dimension_scale:
tensor_slice_start[r + 1] = tensor_slice_start[r] // dimension_scale
tensor_slice_start[r] %= dimension_scale
tensor_slice[r] = slice(tensor_slice_start[r], tensor_slice_start[r] + kernel.shape[r])
sub_convolution_index = (slice(None),) * (len(data.shape) - dimension) + tuple([i, slice(None)])
data_flat[sub_convolution_index] = data[tensor_slice].reshape(data_prefix + (reduce(lambda a, b: a * b, kernel.shape),))
convolution_flat = numpy.matmul(data_flat, numpy.flip(kernel_flat, axis=0))
convolution_nd = convolution_flat.reshape(data_prefix + convolution_shape)
return convolution_nd
def test_matmul_two_vars():
x2 = ad.Variable(name='x2')
x3 = ad.Variable(name='x3')
y = ad.matmul(x2, x3)
grad_x2, grad_x3 = ad.gradients(y, [x2, x3])
executor = ad.Executor([y, grad_x2, grad_x3])
x2_val = np.array([[1, 2], [3, 4], [5, 6]]) # 3x2
x3_val = np.array([[7, 8, 9], [10, 11, 12]]) # 2x3
y_val, grad_x2_val, grad_x3_val = executor.run(feed_shapes={x2: x2_val, x3: x3_val})
expected_yval = np.matmul(x2_val, x3_val)
expected_grad_x2_val = np.matmul(np.ones_like(expected_yval), np.transpose(x3_val))
expected_grad_x3_val = np.matmul(np.transpose(x2_val), np.ones_like(expected_yval))
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, expected_yval)
assert np.array_equal(grad_x2_val, expected_grad_x2_val)
assert np.array_equal(grad_x3_val, expected_grad_x3_val)
def test_matmul_var_and_param():
x2 = ad.Variable(name="x2")
w2_val = np.array([[7, 8, 9], [10, 11, 12]]) # 2x3
w2 = ad.Parameter(name="w2", init=w2_val)
y = ad.matmul(x2, w2)
grad_x2, grad_w2 = ad.gradients(y, [x2, w2])
executor = ad.Executor([y, grad_x2, grad_w2])
x2_val = np.array([[1, 2], [3, 4], [5, 6]]) # 3x2
y_val, grad_x2_val, grad_w2_val = executor.run(feed_shapes={x2: x2_val})
expected_yval = np.matmul(x2_val, w2_val)
expected_grad_x2_val = np.matmul(np.ones_like(expected_yval), np.transpose(w2_val))
expected_grad_x3_val = np.matmul(np.transpose(x2_val), np.ones_like(expected_yval))
assert isinstance(y, ad.Node)
# assert np.array_equal(y_val, expected_yval)
# assert np.array_equal(grad_x2_val, expected_grad_x2_val)
# assert np.array_equal(grad_w2_val, expected_grad_x3_val)
def output_step_scan(self, dummy, new_state):
if self.dale_ratio:
new_output = tf.matmul(
tf.nn.relu(new_state),
tf.matmul(
tf.abs(self.W_out) * self.output_Connectivity,
self.Dale_out,
name="in_2"),
transpose_b=True, name="3")\
+ self.b_out
else:
new_output = tf.matmul(tf.nn.relu(new_state), self.W_out * self.output_Connectivity,
transpose_b=True, name="3") + self.b_out
return new_output
Logistic_Regressor_binary.py 文件源码
项目:learning-rank-public
作者: andreweskeclarke
项目源码
文件源码
阅读 36
收藏 0
点赞 0
评论 0
def gradient(x0, X, y, alpha):
# gradient of the logistic loss
w, c = x0[1:137], x0[0]
#print("c is " + str(c))
z = X.dot(w) + c
z = phi(y * z)
z0 = (z - 1) * y
grad_w = np.matmul(z0,X) / X.shape[0] + alpha * w
grad_c = z0.sum() / X.shape[0]
grad_c = np.array(grad_c)
#print(grad_w[0,1:5])
return np.c_[([grad_c], grad_w)]
##### Stochastic Gradient Descent Optimiser ######
Logistic_Regressor.py 文件源码
项目:learning-rank-public
作者: andreweskeclarke
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def gradient(x0, X, y, alpha):
# gradient of the logistic loss
w, c = x0[1:137], x0[0]
#print("c is " + str(c))
z = X.dot(w) + c
z = phi(y * z)
z0 = (z - 1) * y
grad_w = np.matmul(z0,X) / X.shape[0] + alpha * w
grad_c = z0.sum() / X.shape[0]
grad_c = np.array(grad_c)
#print(grad_w[0,1:5])
return np.c_[([grad_c], grad_w)]
##### Stochastic Gradient Descent Optimiser ######
test_multiarray.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
test_multiarray.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
test_multiarray.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def test_numpy_ufunc_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
def precalc(C, R, x_bar_list, P_bar_list):
assert C.ndim == 2
assert R.ndim == 2
nMeasurement, nStates = x_bar_list.shape
nObservableState = C.shape[0]
z_hat_list = C.dot(x_bar_list.T).T
S_list = np.matmul(np.matmul(C, P_bar_list), C.T) + R
S_inv_list = np.linalg.inv(S_list)
K_list = np.matmul(np.matmul(P_bar_list, C.T), S_inv_list)
P_hat_list = P_bar_list - np.matmul(K_list.dot(C), P_bar_list)
assert z_hat_list.shape == (nMeasurement, nObservableState), "z_hat ERROR"
assert S_list.shape == (nMeasurement, nObservableState, nObservableState), "S ERROR"
assert S_inv_list.shape == S_list.shape, "S_inv ERROR"
assert K_list.shape == (nMeasurement, nStates, nObservableState)
assert P_hat_list.shape == P_bar_list.shape, "P_hat ERROR"
return z_hat_list, S_list, S_inv_list, K_list, P_hat_list
def correlation(task,load=True):
self = mytask
if load:
self.initialize(_load=True, _logging=False, _log_dir='other/')
data = []
for batch in self.iterate_minibatches('valid'):
xtrain, ytrain = batch
ytrain = np.eye(10)[ytrain]
feed_dict = {self.x: xtrain, self.y: ytrain, self.sigma0: 1., self.initial_keep_prob: task['initial_keep_prob'], self.is_training: False}
z = tf.get_collection('log_network')[-1]
batch_z = self.sess.run( z, feed_dict)
data.append(batch_z)
data = np.vstack(data)
data = data.reshape(data.shape[0],-1)
def normal_tc(c0):
c1i = np.diag(1./np.diag(c0))
p = np.matmul(c1i,c0)
return - .5 * np.linalg.slogdet(p)[1] / c0.shape[0]
c0 = np.cov( data, rowvar=False )
tc = normal_tc(c0)
print "Total correlation: %f" % tc
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)