def gen_pruned_features(name):
print name
feature_dir = 'data/feature_' + args.domain + \
'_' + str(args.n_boxes) + 'boxes/' + name + '/'
n_clips = len(glob.glob(feature_dir + BOX_FEATURE + '*.npy'))
for clip in xrange(1, n_clips+1):
pruned_boxes = np.load(feature_dir + BOX_FEATURE + '{:04d}.npy'.format(clip)) # (50, args.n_boxes, 4)
roisavg = np.load(feature_dir + 'roisavg{:04d}.npy'.format(clip)) # (50, args.n_boxes, 512)
pruned_roisavg = np.zeros((50, args.n_boxes, 512))
for frame in xrange(50):
for box_id in xrange(args.n_boxes):
if not np.array_equal(pruned_boxes[frame][box_id], np.zeros((4))):
pruned_roisavg[frame][box_id] = roisavg[frame][box_id]
np.save('{}pruned_roisavg{:04d}'.format(feature_dir, clip), pruned_roisavg)
python类array_equal()的实例源码
pruned_box_features.py 文件源码
项目:Deep360Pilot-optical-flow
作者: yenchenlin
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def test_intersection_com_mock_2(self):
ls = LineString([(1, 1, 9.48024060e+08), (2, 2, 9.49363260e+08),
(3, 1, 9.51868860e+08)])
poly = Polygon([(1, 1), (1, 3), (4, 3), (4, 1), (1, 1)])
self.traj2.intersection_shapely = MagicMock(return_value=ls)
response = self.traj2.intersection_shapely(poly)
ls = np.array(ls)
trajMock = self.traj2.to_Trajectory(response)
traj = Trajectory(ls[:, 0], ls[:, 1], ls[:, 2])
assert (np.array_equal(trajMock.getX(), traj.getX()))
assert (np.array_equal(trajMock.getY(), traj.getY()))
assert (np.array_equal(trajMock.getTime(), traj.getTime()))
def _test_FileSink(self, format):
filename = self._tempfileName('sink_%s' % format)
complexData = format.startswith('C')
typecode = format[1]
dataFormat, dataType = self.TYPEMAP[typecode]
indata = [dataType(x) for x in xrange(16)]
source = sb.DataSource(dataFormat=dataFormat)
sink = sb.FileSink(filename, midasFile=True)
source.connect(sink)
sb.start()
source.push(indata, complexData=complexData, EOS=True)
sink.waitForEOS()
hdr, outdata = bluefile.read(filename)
self.assertEqual(hdr['format'], format)
if complexData:
if dataFormat in ('double', 'float'):
outdata = list(self._flatten(outdata))
else:
outdata = outdata.flatten()
self.assertTrue(numpy.array_equal(indata, outdata), msg="Format '%s' %s != %s" % (format, indata, outdata))
def _test_FileSource(self, format):
filename = self._tempfileName('source_%s' % format)
complexData = format.startswith('C')
typecode = format[1]
dataFormat, dataType = self.TYPEMAP[typecode]
indata = self._generateSourceData(format, 16)
hdr = bluefile.header(1000, format)
bluefile.write(filename, hdr, indata)
source = sb.FileSource(filename, midasFile=True, dataFormat=dataFormat)
sink = sb.DataSink()
source.connect(sink)
sb.start()
outdata = sink.getData(eos_block=True)
if complexData:
self.assertEqual(sink.sri().mode, 1)
if dataFormat in ('float', 'double'):
outdata = bulkio_helpers.bulkioComplexToPythonComplexList(outdata)
else:
outdata = numpy.reshape(outdata, (len(outdata)/2,2))
else:
self.assertEqual(sink.sri().mode, 0)
self.assertTrue(numpy.array_equal(indata, outdata), msg='%s != %s' % (indata, outdata))
def Verify(**kwargs):
'''
Verification for the signature
i/p:
msg: the string sent by the sender
(z,c): vectors in Zq, the signature
A : numpy array, Verification Key dimension nxm
T : the matrix AS mod q ,it is used in the Verification of the signature
'''
msg, z, c, A, T, sd, eta, m, k, q = kwargs['msg'], kwargs['z'], kwargs['c'], kwargs['A'], kwargs['T'], kwargs['sd'], kwargs['eta'], kwargs['m'], kwargs['k'], kwargs['q']
norm_bound = eta * sd * np.sqrt(m)
# checks for norm of z being small and that H(Az-Tc mod q,msg) hashes to c
vec = util.vector_to_Zq(np.array(np.matmul(A,z) - np.matmul(T,c)), q)
hashedList = util.hash_to_baseb(vec, msg, 3, k)
print hashedList, c
if np.sqrt(z.dot(z)) <= norm_bound and np.array_equal(c, hashedList):
return True
else:
return False
def test_non_aligned_read():
delete_layer()
cv, data = create_layer(size=(128,64,64,1), offset=(0,0,0))
# the last dimension is the number of channels
assert cv[31:65,0:64,0:64].shape == (34,64,64,1)
assert np.all(cv[31:65,0:64,0:64] == data[31:65,:64,:64,:])
# read a single pixel
delete_layer()
cv, data = create_layer(size=(64,64,64,1), offset=(0,0,0))
# the last dimension is the number of channels
assert cv[22:23,22:23,22:23].shape == (1,1,1,1)
assert np.all(cv[22:23,22:23,22:23] == data[22:23,22:23,22:23,:])
# Test steps (negative steps are not supported)
img1 = cv[::2, ::2, ::2, :]
img2 = cv[:, :, :, :][::2, ::2, ::2, :]
assert np.array_equal(img1, img2)
def __setitem__(self, slices, img):
imgshape = list(img.shape)
if len(imgshape) == 3:
imgshape = imgshape + [ self.num_channels ]
maxsize = list(self.bounds.maxpt) + [ self.num_channels ]
minsize = list(self.bounds.minpt) + [ 0 ]
slices = generate_slices(slices, minsize, maxsize)
bbox = Bbox.from_slices(slices)
slice_shape = list(bbox.size3()) + [ slices[3].stop - slices[3].start ]
if not np.array_equal(imgshape, slice_shape):
raise ValueError("Illegal slicing, Image shape: {} != {} Slice Shape".format(imgshape, slice_shape))
if self.path.protocol == 'boss':
self.upload_boss_image(img, bbox.minpt)
else:
self.upload_image(img, bbox.minpt)
def test_dense_to_sparse(self):
""" Test if `dense_to_sparse` works properly."""
with tf.Session().as_default():
dense = tf.constant([[1., 2., 0.], [0., 0., 3.]], dtype=tf.float32)
sparse = dense_to_sparse(dense)
self.assertTrue(np.array_equal(sparse.indices.eval(), np.array([[0, 0], [0, 1], [1, 2]])))
self.assertTrue(np.array_equal(sparse.values.eval(), np.array([1., 2., 3.])))
mask = tf.constant([[0, 1, 0], [1, 0, 0]], dtype=tf.int32)
masked = dense_to_sparse(dense, mask)
self.assertTrue(np.array_equal(masked.indices.eval(), np.array([[0, 1], [1, 0]])))
self.assertTrue(np.array_equal(masked.values.eval(), np.array([2., 0.])))
word2vec_helpers.py 文件源码
项目:question-classification-cnn-rnn-attention
作者: sefira
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def __init__(self, test_model=False, verify_model=True):
model = Word2Vec.load(modelfile)
if(test_model):
acc = model.accuracy(questionfile)
logger.info("Test model " + modelfile + " in " + questionfile)
self.vector_size = model.vector_size
self.vocab_size = len(model.wv.vocab) + 1
self.word2index = self.GetWord2Index(model)
self.index2word = self.GetIndex2Word(model)
self.wordvector = self.GetWordVector(model)
if(verify_model):
logger.info("Verifing imported word2vec model")
random_state = check_random_state(12)
check_index = random_state.randint(low=0, high=self.vocab_size-2,size=1000)
for index in check_index:
word_wv = model.wv.index2word[index]
word_our = self.index2word[index+1]
#print(index, word_wv, word_our)
assert word_wv == word_our
assert model.wv.vocab[word_our].index == self.word2index[word_our] - 1
assert np.array_equal(model.wv[word_our], self.wordvector[self.word2index[word_our]])
logger.info("Imported word2vec model is verified")
word2vec_helpers.py 文件源码
项目:question-classification-cnn-rnn-attention
作者: sefira
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def __init__(self, test_model=False, verify_model=True):
model = Word2Vec.load(modelfile)
if(test_model):
acc = model.accuracy(questionfile)
logger.info("Test model " + modelfile + " in " + questionfile)
self.vector_size = model.vector_size
self.vocab_size = len(model.wv.vocab) + 1
self.word2index = self.GetWord2Index(model)
self.index2word = self.GetIndex2Word(model)
self.wordvector = self.GetWordVector(model)
if(verify_model):
logger.info("Verifing imported word2vec model")
random_state = check_random_state(12)
check_index = random_state.randint(low=0, high=self.vocab_size-2,size=1000)
for index in check_index:
word_wv = model.wv.index2word[index]
word_our = self.index2word[index+1]
#print(index, word_wv, word_our)
assert word_wv == word_our
assert model.wv.vocab[word_our].index == self.word2index[word_our] - 1
assert np.array_equal(model.wv[word_our], self.wordvector[self.word2index[word_our]])
logger.info("Imported word2vec model is verified")
word2vec_helpers.py 文件源码
项目:question-classification-cnn-rnn-attention
作者: sefira
项目源码
文件源码
阅读 37
收藏 0
点赞 0
评论 0
def __init__(self, test_model=False, verify_model=True):
model = Word2Vec.load(modelfile)
if(test_model):
acc = model.accuracy(questionfile)
logger.info("Test model " + modelfile + " in " + questionfile)
self.vector_size = model.vector_size
self.vocab_size = len(model.wv.vocab) + 1
self.word2index = self.GetWord2Index(model)
self.index2word = self.GetIndex2Word(model)
self.wordvector = self.GetWordVector(model)
if(verify_model):
logger.info("Verifing imported word2vec model")
random_state = check_random_state(12)
check_index = random_state.randint(low=0, high=self.vocab_size-2,size=1000)
for index in check_index:
word_wv = model.wv.index2word[index]
word_our = self.index2word[index+1]
#print(index, word_wv, word_our)
assert word_wv == word_our
assert model.wv.vocab[word_our].index == self.word2index[word_our] - 1
assert np.array_equal(model.wv[word_our], self.wordvector[self.word2index[word_our]])
logger.info("Imported word2vec model is verified")
def test_vectorized_jaccard_sim():
# The vectorized version of jaccard similarity is 20x faster, but it is
# harder to understand. Compute it the simple way and compare to the
# vectorized version
def jaccard_sim(X, Y):
assert len(X) == len(Y)
a = np.sum((X == 1) & (Y == 1))
d = np.sum((X == 0) & (Y == 0))
return a / float(len(X) - d)
def binary_sim(mat):
n_rows = mat.shape[0]
out = np.empty((n_rows, n_rows), dtype=np.float64)
for i in range(n_rows):
out[i][i] = 1.
for j in range(0, i):
out[i][j] = jaccard_sim(mat[i], mat[j])
out[j][i] = out[i][j]
return out
# Simulate 200 queries with 100 shared page ids
matrix = np.random.rand(200, 100) > 0.7
simple = binary_sim(matrix)
vectorized = mjolnir.norm_query._binary_sim(matrix)
assert np.array_equal(simple, vectorized)
def test_classify_image_in_memory():
# create mineral classifier instance with image loading enabled
mc = mineral.MineralClassification(libraryFilenames[0], in_memory=True)
# for each of the test images
for image_file_name in test_classifyImage_testFilenames:
# classify the test image
classified_file_name = image_file_name[:-4] + "_class_test.hdr"
mc.classify_image(image_file_name, classified_file_name)
actual = spectral.open_image(classified_file_name)
# classified image for comparison
expected = spectral.open_image(image_file_name[:-4] + "_class.hdr")
# verify that every pixel has the same classification
assert numpy.array_equal(expected.asarray(), actual.asarray())
# test files for classify image threshold and subset tests
def _core_dist(point, neighbors, dist_function):
"""
Computes the core distance of a point.
Core distance is the inverse density of an object.
Args:
point (np.array): array of dimensions (n_features,)
point to compute core distance of
neighbors (np.ndarray): array of dimensions (n_neighbors, n_features):
array of all other points in object class
dist_dunction (func): function to determine distance between objects
func args must be [np.array, np.array] where each array is a point
Returns: core_dist (float)
inverse density of point
"""
n_features = np.shape(point)[0]
n_neighbors = np.shape(neighbors)[1]
numerator = 0
for row in neighbors:
if not np.array_equal(point, row):
numerator += (1/dist_function(point, row))**n_features
core_dist = (numerator / (n_neighbors)) ** (-1/n_features)
return core_dist
def test_intersection_com_mock(self):
ls = LineString([(1.5, 1, 9.48024060e+08), (2, 2, 9.49363260e+08),
(3, 2, 9.51868860e+08), (4, 3, 9.53208060e+08)])
poly = Polygon([(1, 1), (1, 3), (4, 3), (4, 1), (1, 1)])
self.traj.intersection_shapely = MagicMock(return_value=ls)
response = self.traj.intersection_shapely(poly)
ls = np.array(ls)
trajMock = self.traj.to_Trajectory(response)
traj = Trajectory(ls[:, 0], ls[:, 1], ls[:, 2])
assert (np.array_equal(trajMock.getX(), traj.getX()))
assert (np.array_equal(trajMock.getY(), traj.getY()))
assert (np.array_equal(trajMock.getTime(), traj.getTime()))
def test_array_equal(self):
res = np.array_equal(np.array([1, 2]), np.array([1, 2]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([3, 4]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([1, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1'))
assert_(res)
assert_(type(res) is bool)
res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'),
np.array([('a', 1)], dtype='S1,u4'))
assert_(res)
assert_(type(res) is bool)
def test_slice_metadata_using_already_sliced_data_df(self):
data = pd.DataFrame([[2, 3], [5, 6], [11, 12]],
index=["a", "b", "d"],
columns=["f", "g"])
row_meta = pd.DataFrame([["rm1", "rm2"],["rm3", "rm4"],
["rm5", "rm6"],["rm7", "rm8"]],
index=["a", "b", "c", "d"],
columns=["row_field1", "row_field2"])
col_meta = pd.DataFrame([["cm1", "cm2"],["cm3", "cm4"],["cm5", "cm6"]],
index=["e", "f", "g"],
columns=["col_field1", "col_field2"])
e_row_meta = pd.DataFrame([["rm1", "rm2"],["rm3", "rm4"],["rm7", "rm8"]],
index=["a", "b", "d"],
columns=["row_field1", "row_field2"])
e_col_meta = pd.DataFrame([["cm3", "cm4"],["cm5", "cm6"]],
index=["f", "g"],
columns=["col_field1", "col_field2"])
out_gct = dry.slice_metadata_using_already_sliced_data_df(data, row_meta, col_meta)
self.assertTrue(np.array_equal(out_gct.row_metadata_df, e_row_meta),
"row_metadata_df is wrong: \n{}".format(out_gct.row_metadata_df))
self.assertTrue(np.array_equal(out_gct.col_metadata_df, e_col_meta),
"col_metadata_df is wrong: \n{}".format(out_gct.col_metadata_df))
def test_make_norm_ndarray(self):
ROW_SUBSET_FIELD = "pr_probe_normalization_group"
COL_SUBSET_FIELD = "det_normalization_group_vector"
row_df = pd.DataFrame(np.array([["8350", "1"], ["8350", "1"],
["8350", "2"], ["8350", "2"]]),
index=["r1", "r2", "r3", "r4"],
columns=["pr_gene_id", "pr_probe_normalization_group"])
col_df = pd.DataFrame(np.array([["G-0022", "1,1"], ["G-0022", "1,1"], ["G-0022", "1,2"],
["G-0022", "2,2"], ["G-0022", "2,2"]]),
index=["c1", "c2", "c3", "c4", "c5"],
columns=["det_plate", "det_normalization_group_vector"])
e_norm_ndarray = np.array([[1, 1, 1, 2, 2],
[1, 1, 1, 2, 2],
[1, 1, 2, 2, 2],
[1, 1, 2, 2, 2]])
norm_ndarray = tear.make_norm_ndarray(row_df, col_df, ROW_SUBSET_FIELD, COL_SUBSET_FIELD)
self.assertTrue(np.array_equal(norm_ndarray, e_norm_ndarray),
("\nExpected out:\n{} " +
"\nActual out:\n{}").format(e_norm_ndarray, norm_ndarray))
def test_save_txt_and_load_txt():
# Get sample data set
dataset = get_dataset()
# Get in-memory string handle
with StringIO() as handle:
# Save text to handle
dataset.save_txt(handle)
handle.seek(0)
# Load text from handle
dataset2 = LtrDataset.load_txt(handle)
# Assert that everything loaded correctly
assert_true(np.array_equal(dataset.feature_vectors,
dataset2.feature_vectors))
assert_true(np.array_equal(dataset.relevance_scores,
dataset2.relevance_scores))
assert_true(np.array_equal(dataset.query_pointer,
dataset2.query_pointer))
assert_true(np.array_equal(dataset.query_ids, dataset2.query_ids))
def test_save_and_load():
# Get sample data set
dataset = get_dataset()
# Get in-memory binary handle
with BytesIO() as handle:
# Save binary to handle
dataset.save(handle)
handle.seek(0)
# Load binary from handle
dataset2 = LtrDataset.load(handle)
# Assert that everything loaded correctly
assert_true(np.array_equal(dataset.feature_vectors,
dataset2.feature_vectors))
assert_true(np.array_equal(dataset.relevance_scores,
dataset2.relevance_scores))
assert_true(np.array_equal(dataset.query_pointer,
dataset2.query_pointer))
assert_true(np.array_equal(dataset.query_ids, dataset2.query_ids))
def test_backward():
# Construct test data
x = Variable(np.array([5., 3., 3., 1., 0.]))
g = Variable(np.ones(5))
expected_result = np.array([0.7717692057972512, 0.562087881852882,
1.4058826163342215, 0.9213241007090265,
1.3389361953066183])
# Generate object
lcse = LogCumsumExp()
# Run forward and backward pass
lcse.forward((x.data,))
result = lcse.backward((x.data, ), (g.data, ))
# Assert that the result equals the expected result
assert_true(np.array_equal(result[0], expected_result))
def d2_at_Z(self, z=15.0):
def is_z(ax):
for axis in np.array([[z,0,0],[0,z,0],[0,0,z]]):
if np.array_equal(ax, axis):
return True
return False
if is_z(self._lattice[0]):
self.swap_axis((2,1,0))
return None
elif is_z(self._lattice[1]):
self.swap_axis((0,2,1))
return None
else:
# print("DO NOTHING...")
return None
def test_prepare_fcma_data():
images = io.load_images_from_dir(data_dir, suffix=suffix)
mask = io.load_boolean_mask(mask_file)
conditions = io.load_labels(epoch_file)
raw_data, _, labels = prepare_fcma_data(images, conditions, mask)
expected_raw_data = np.load(expected_dir / 'expected_raw_data.npy')
assert len(raw_data) == len(expected_raw_data), \
'numbers of epochs do not match in test_prepare_fcma_data'
for idx in range(len(raw_data)):
assert np.allclose(raw_data[idx], expected_raw_data[idx]), \
'raw data do not match in test_prepare_fcma_data'
assert np.array_equal(labels, expected_labels), \
'the labels do not match in test_prepare_fcma_data'
from brainiak.fcma.preprocessing import RandomType
images = io.load_images_from_dir(data_dir, suffix=suffix)
random_raw_data, _, _ = prepare_fcma_data(images, conditions, mask,
random=RandomType.REPRODUCIBLE)
assert len(random_raw_data) == len(expected_raw_data), \
'numbers of epochs do not match in test_prepare_fcma_data'
images = io.load_images_from_dir(data_dir, suffix=suffix)
random_raw_data, _, _ = prepare_fcma_data(images, conditions, mask,
random=RandomType.UNREPRODUCIBLE)
assert len(random_raw_data) == len(expected_raw_data), \
'numbers of epochs do not match in test_prepare_fcma_data'
def test_weighted_var():
es = brainiak.eventseg.event.EventSegment(2)
D = np.zeros((8, 4))
for t in range(4):
D[t, :] = (1/np.sqrt(4/3)) * np.array([-1, -1, 1, 1])
for t in range(4, 8):
D[t, :] = (1 / np.sqrt(4 / 3)) * np.array([1, 1, -1, -1])
mean_pat = D[[0, 4], :].T
weights = np.zeros((8, 2))
weights[:, 0] = [1, 1, 1, 1, 0, 0, 0, 0]
weights[:, 1] = [0, 0, 0, 0, 1, 1, 1, 1]
assert np.array_equal(
es.calc_weighted_event_var(D, weights, mean_pat), [0, 0]),\
"Failed to compute variance with 0/1 weights"
weights[:, 0] = [1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5]
weights[:, 1] = [0.5, 0.5, 0.5, 0.5, 1, 1, 1, 1]
true_var = (4 * 0.5 * 12)/(6 - 5/6) * np.ones(2) / 4
assert np.allclose(
es.calc_weighted_event_var(D, weights, mean_pat), true_var),\
"Failed to compute variance with fractional weights"
def test_sentence_to_seq(sentence_to_seq):
sentence = 'this is a test sentence'
vocab_to_int = {'<PAD>': 0, '<EOS>': 1, '<UNK>': 2, 'this': 3, 'is': 6, 'a': 5, 'sentence': 4}
output = sentence_to_seq(sentence, vocab_to_int)
assert len(output) == 5,\
'Wrong length. Found a length of {}'.format(len(output))
assert output[3] == 2,\
'Missing <UNK> id.'
assert np.array_equal(output, [3, 6, 5, 2, 4]),\
'Incorrect ouput. Found {}'.format(output)
_print_success_message()
def test_CentreTransform_caching(make_random_data):
# Generate an initial set of data
x, mu, std = make_random_data
# Apply the CentreTransform to the first dataset to preserve the mean
x_copy = x.copy()
center_transformer = CentreTransform()
center_transformer(x_copy)
# Now apply the center transform to a matrix that has been translated
x_translated = x + 3.0 * mu
x_expected = x_translated - mu
x_produced = center_transformer(x_translated)
# Check that the transformer used the mean mu instead of the translated
# mean which was 3 * mu in this case above
assert np.array_equal(x_expected, x_produced)
def test_StandardiseTransform_caching(make_random_data):
# Generate an initial set of data
x, mu, std = make_random_data
# Apply the CentreTransform to the first dataset to preserve the mean
x_copy = x.copy()
center_transformer = CentreTransform()
center_transformer(x_copy)
# Now apply the center transform to a matrix translated by 2 * mu
x_translated = x + 3.0 * mu
x_expected = x_translated - mu
x_produced = center_transformer(x_translated)
# Check that the transformer used the mean mu instead of the translated
# mean which was 4 * mu in this case above
assert np.array_equal(x_expected, x_produced)
def test_atoms_to_system(self):
"""Tests that an ASE Atoms is succesfully converted to a System object.
"""
class NaClFactory(SimpleCubicFactory):
"A factory for creating NaCl (B1, Rocksalt) lattices."
bravais_basis = [[0, 0, 0], [0, 0, 0.5], [0, 0.5, 0], [0, 0.5, 0.5],
[0.5, 0, 0], [0.5, 0, 0.5], [0.5, 0.5, 0],
[0.5, 0.5, 0.5]]
element_basis = (0, 1, 1, 0, 1, 0, 0, 1)
nacl = NaClFactory()(symbol=["Na", "Cl"], latticeconstant=5.6402)
system = System.from_atoms(nacl)
self.assertTrue(np.array_equal(nacl.get_positions(), system.get_positions()))
self.assertTrue(np.array_equal(nacl.get_initial_charges(), system.get_initial_charges()))
self.assertTrue(np.array_equal(nacl.get_atomic_numbers(), system.get_atomic_numbers()))
self.assertTrue(np.array_equal(nacl.get_chemical_symbols(), system.get_chemical_symbols()))
self.assertTrue(np.array_equal(nacl.get_cell(), system.get_cell()))
self.assertTrue(np.array_equal(nacl.get_pbc(), system.get_pbc()))
self.assertTrue(np.array_equal(nacl.get_scaled_positions(), system.get_scaled_positions()))
def test_matrix(self):
desc = CoulombMatrix(n_atoms_max=5, flatten=False)
cm = desc.create(H2O)
# Test against assumed values
q = H2O.get_initial_charges()
p = H2O.get_positions()
norm = np.linalg.norm
assumed = np.array(
[
[0.5*q[0]**2.4, q[0]*q[1]/(norm(p[0]-p[1])), q[0]*q[2]/(norm(p[0]-p[2]))],
[q[1]*q[0]/(norm(p[1]-p[0])), 0.5*q[1]**2.4, q[1]*q[2]/(norm(p[1]-p[2]))],
[q[2]*q[0]/(norm(p[2]-p[0])), q[2]*q[1]/(norm(p[2]-p[1])), 0.5*q[2]**2.4],
]
)
zeros = np.zeros((5, 5))
zeros[:3, :3] = assumed
assumed = zeros
self.assertTrue(np.array_equal(cm, assumed))
def test_cputensor_add():
"""TODO."""
Y = ng.make_axis(length=2)
M = ng.make_axis(length=2)
N = ng.make_axis(length=2)
a = ng.constant(np.array([3, 5], dtype=np.float32), [Y])
b = ng.constant(np.array([3, 5], dtype=np.float32), [Y])
c = a + b
with executor(c) as ex:
result = ex()
assert np.array_equal(result, [6, 10])
np_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
np_b = np.array([[1, 2], [3, 4]], dtype=np.float32)
np_c = np_a + np_b
a = ng.constant(np_a, [M, N])
b = ng.constant(np_b, [M, N])
c = a + b
with executor(c) as ex:
result = ex()
assert np.array_equal(result, np_c)