def __init__(self, source, **params):
#_Graph.__init__(self)
self.is_static = False
if isinstance(source, str): # it is a file
self._load(source, **params)
else: # source must be an EventQueue then
# to do: read from event queue
# should also get self.starts, ...
pass
self.t_start = params.get('t_start', np.min(self.starts))
self.t_stop = params.get('t_stop', np.max(self.stops))
# ToDo: Ideally only use self.all_nodes
self.all_nodes = list(np.union1d(self.node1s, self.node2s))
all_nodes = list(np.union1d(self.node1s, self.node2s))
n = len(self.all_nodes)
def get_id(an_id):
return all_nodes.index(an_id)
v_get_id = np.vectorize(get_id)
self.node1s = v_get_id(self.node1s)
self.node2s = v_get_id(self.node2s)
# now we need to remap the node ids
_Graph.__init__(self, n=n)
python类union1d()的实例源码
def uunion1d(arr1, arr2):
"""Find the union of two arrays.
A wrapper around numpy.intersect1d that preserves units. All input arrays
must have the same units. See the documentation of numpy.intersect1d for
full details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uunion1d(A, B)
YTArray([ 1., 2., 3., 4.]) cm
"""
v = np.union1d(arr1, arr2)
v = validate_numpy_wrapper_units(v, [arr1, arr2])
return v
def __apply_func(self, other, func_name):
if isinstance(other, Signal):
time = np.union1d(self.timestamps, other.timestamps)
s = self.interp(time).samples
o = other.interp(time).samples
func = getattr(s, func_name)
s = func(o)
elif other is None:
s = self.samples
time = self.timestamps
else:
func = getattr(self.samples, func_name)
s = func(other)
time = self.timestamps
return Signal(s,
time,
self.unit,
self.name,
self.info)
def test_confusion_matrix():
# Defining numpy implementation of confusion matrix
def numpy_conf_mat(actual, pred):
order = numpy.union1d(actual, pred)
colA = numpy.matrix(actual).T
colP = numpy.matrix(pred).T
oneHotA = colA.__eq__(order).astype('int64')
oneHotP = colP.__eq__(order).astype('int64')
conf_mat = numpy.dot(oneHotA.T, oneHotP)
conf_mat = numpy.asarray(conf_mat)
return [conf_mat, order]
x = tensor.vector()
y = tensor.vector()
f = theano.function([x, y], confusion_matrix(x, y))
list_inputs = [[[0, 1, 2, 1, 0], [0, 0, 2, 1, 2]],
[[2, 0, 2, 2, 0, 1], [0, 0, 2, 2, 0, 2]]]
for case in list_inputs:
a = numpy.asarray(case[0])
b = numpy.asarray(case[1])
out_exp = numpy_conf_mat(a, b)
outs = f(case[0], case[1])
for exp, out in zip(out_exp, outs):
utt.assert_allclose(exp, out)
def subtract(curve1, curve2, def_val=0):
"""
Function calculates difference between curve1 and curve2
and returns new object which domain is an union
of curve1 and curve2 domains
Returned object is of type type(curve1)
and has same metadata as curve1 object
:param curve1: first curve to calculate the difference
:param curve2: second curve to calculate the difference
:param def_val: default value for points that cannot be interpolated
:return: new object of type type(curve1) with element-wise difference
(using interpolation if necessary)
"""
coord1 = np.union1d(curve1.x, curve2.x)
y1 = curve1.evaluate_at_x(coord1, def_val)
y2 = curve2.evaluate_at_x(coord1, def_val)
coord2 = y1 - y2
# the below is explained at the end of curve.Curve.change_domain()
obj = curve1.__class__(np.dstack((coord1, coord2))[0], **curve1.__dict__['metadata'])
return obj
def relabelAllSequences(zBySeq, specialStateIDs):
''' Relabel all sequences in provided list.
Returns
-------
zBySeq, relabelled so that each label in specialStateIDs
now corresponds to ids 0, 1, 2, ... L-1
and all other labels not in that set get ids L, L+1, ...
'''
import copy
zBySeq = copy.deepcopy(zBySeq)
L = len(specialStateIDs)
uniqueVals = []
for z in zBySeq:
z += 1000
for kID, kVal in enumerate(specialStateIDs):
z[z == 1000 + kVal] = -1000 + kID
uniqueVals = np.union1d(uniqueVals, np.unique(z))
for z in zBySeq:
for kID, kVal in enumerate(sorted(uniqueVals)):
z[z == kVal] = kID
return zBySeq
def multi_x_reader(self, spc_file):
# use x-values as domain
all_x = []
for sub in spc_file.sub:
x = sub.x
# assume values in x do not repeat
all_x = np.union1d(all_x, x)
domain = Orange.data.Domain([Orange.data.ContinuousVariable.make("%f" % f) for f in all_x], None)
instances = []
for sub in spc_file.sub:
x, y = sub.x, sub.y
newinstance = np.ones(len(all_x))*np.nan
ss = np.searchsorted(all_x, x) # find positions to set
newinstance[ss] = y
instances.append(newinstance)
y_data = np.array(instances).astype(float, order='C')
return Orange.data.Table.from_numpy(domain, y_data)
def pairwiseScore(inFile_1, inFile_2, logDebug, outFile):
(snpCHR1, snpPOS1, snpGT1, snpWEI1, DPmean1) = parseInput(inFile = inFile_1, logDebug = logDebug)
(snpCHR2, snpPOS2, snpGT2, snpWEI2, DPmean2) = parseInput(inFile = inFile_2, logDebug = logDebug)
snpmatch_stats = {}
unique_1, unique_2, common, scores = 0, 0, 0, 0
chrs = np.union1d(snpCHR1, snpCHR2)
for i in chrs:
perchrTarPosInd1 = np.where(snpCHR1 == i)[0]
perchrTarPosInd2 = np.where(snpCHR2 == i)[0]
log.info("Analysing chromosome %s positions", i)
perchrtarSNPpos1 = snpPOS1[perchrTarPosInd1]
perchrtarSNPpos2 = snpPOS2[perchrTarPosInd2]
matchedAccInd1 = np.where(np.in1d(perchrtarSNPpos1, perchrtarSNPpos2))[0]
matchedAccInd2 = np.where(np.in1d(perchrtarSNPpos2, perchrtarSNPpos1))[0]
unique_1 = unique_1 + len(perchrTarPosInd1) - len(matchedAccInd1)
unique_2 = unique_2 + len(perchrTarPosInd2) - len(matchedAccInd2)
common = common + len(matchedAccInd1)
scores = scores + np.sum(np.array(snpGT1[matchedAccInd1] == snpGT2[matchedAccInd2], dtype = int))
snpmatch_stats['unique'] = {"%s" % os.path.basename(inFile_1): [float(unique_1)/len(snpCHR1), len(snpCHR1)], "%s" % os.path.basename(inFile_2): [float(unique_2)/len(snpCHR2), len(snpCHR2)]}
snpmatch_stats['matches'] = [float(scores)/common, common]
if not outFile:
outFile = "genotyper"
log.info("writing output in a file: %s" % outFile + ".matches.json")
with open(outFile + ".matches.json", "w") as out_stats:
out_stats.write(json.dumps(snpmatch_stats))
log.info("finished!")
def union1d(ar1, ar2):
"""
Find the union of two arrays.
Return the unique, sorted array of values that are in either of the two
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. They are flattened if they are not already 1D.
Returns
-------
union1d : ndarray
Unique, sorted union of the input arrays.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.union1d([-1, 0, 1], [-2, 0, 2])
array([-2, -1, 0, 1, 2])
To find the union of more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([1, 2, 3, 4, 6])
"""
return unique(np.concatenate((ar1, ar2)))
def intersect_sim(array_1, array_2):
"""Calculate the simiarity of two arrays
by using intersection / union
"""
sim = float(np.intersect1d(array_1, array_2).size) / \
float(np.union1d(array_1, array_2).size)
return sim
def union_classes(eval_segm, gt_segm):
eval_cl, _ = extract_classes(eval_segm)
gt_cl, _ = extract_classes(gt_segm)
cl = np.union1d(eval_cl, gt_cl)
n_cl = len(cl)
return cl, n_cl
def __init__(self, edges):
self.edges = edges
self.nodes = Nodes(len(numpy.union1d(self.edges.begin, self.edges.end)))
for i in xrange(len(self.edges)):
j = i if True else 0
self.nodes.outgoing[self.edges.begin[i]].append(j)
self.nodes.incoming[self.edges.end [i]].append(j)
def union1d(ar1, ar2):
"""
Find the union of two arrays.
Return the unique, sorted array of values that are in either of the two
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. They are flattened if they are not already 1D.
Returns
-------
union1d : ndarray
Unique, sorted union of the input arrays.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.union1d([-1, 0, 1], [-2, 0, 2])
array([-2, -1, 0, 1, 2])
To find the union of more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([1, 2, 3, 4, 6])
"""
return unique(np.concatenate((ar1, ar2)))
def test_numpy_wrappers():
a1 = YTArray([1, 2, 3], 'cm')
a2 = YTArray([2, 3, 4, 5, 6], 'cm')
catenate_answer = [1, 2, 3, 2, 3, 4, 5, 6]
intersect_answer = [2, 3]
union_answer = [1, 2, 3, 4, 5, 6]
assert_array_equal(YTArray(catenate_answer, 'cm'), uconcatenate((a1, a2)))
assert_array_equal(catenate_answer, np.concatenate((a1, a2)))
assert_array_equal(YTArray(intersect_answer, 'cm'), uintersect1d(a1, a2))
assert_array_equal(intersect_answer, np.intersect1d(a1, a2))
assert_array_equal(YTArray(union_answer, 'cm'), uunion1d(a1, a2))
assert_array_equal(union_answer, np.union1d(a1, a2))
def test_boolean_spheres_overlap():
r"""Test to make sure that boolean objects (spheres, overlap)
behave the way we expect.
Test overlapping spheres.
"""
ds = fake_amr_ds()
sp1 = ds.sphere([0.45, 0.45, 0.45], 0.15)
sp2 = ds.sphere([0.55, 0.55, 0.55], 0.15)
# Get indices of both.
i1 = sp1["index","morton_index"]
i2 = sp2["index","morton_index"]
# Make some booleans
bo1 = sp1 & sp2
bo2 = sp1 - sp2
bo3 = sp1 | sp2
bo4 = ds.union([sp1, sp2])
bo5 = ds.intersection([sp1, sp2])
# Now make sure the indices also behave as we expect.
lens = np.intersect1d(i1, i2)
apple = np.setdiff1d(i1, i2)
both = np.union1d(i1, i2)
b1 = bo1["index","morton_index"]
b1.sort()
b2 = bo2["index","morton_index"]
b2.sort()
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b1, lens)
assert_array_equal(b2, apple)
assert_array_equal(b3, both)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, b4)
assert_array_equal(b1, b5)
bo6 = sp1 ^ sp2
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def test_boolean_regions_overlap():
r"""Test to make sure that boolean objects (regions, overlap)
behave the way we expect.
Test overlapping regions.
"""
ds = fake_amr_ds()
re1 = ds.region([0.55]*3, [0.5]*3, [0.6]*3)
re2 = ds.region([0.6]*3, [0.55]*3, [0.65]*3)
# Get indices of both.
i1 = re1["index","morton_index"]
i2 = re2["index","morton_index"]
# Make some booleans
bo1 = re1 & re2
bo2 = re1 - re2
bo3 = re1 | re2
bo4 = ds.union([re1, re2])
bo5 = ds.intersection([re1, re2])
# Now make sure the indices also behave as we expect.
cube = np.intersect1d(i1, i2)
bite_cube = np.setdiff1d(i1, i2)
both = np.union1d(i1, i2)
b1 = bo1["index","morton_index"]
b1.sort()
b2 = bo2["index","morton_index"]
b2.sort()
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b1, cube)
assert_array_equal(b2, bite_cube)
assert_array_equal(b3, both)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, b4)
assert_array_equal(b1, b5)
bo6 = re1 ^ re2
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def test_boolean_slices_overlap():
r"""Test to make sure that boolean objects (slices, overlap)
behave the way we expect.
Test overlapping slices.
"""
ds = fake_amr_ds()
sl1 = ds.r[:,:,0.25]
sl2 = ds.r[:,0.75,:]
# Get indices of both.
i1 = sl1["index","morton_index"]
i2 = sl2["index","morton_index"]
# Make some booleans
bo1 = sl1 & sl2
bo2 = sl1 - sl2
bo3 = sl1 | sl2
bo4 = ds.union([sl1, sl2])
bo5 = ds.intersection([sl1, sl2])
# Now make sure the indices also behave as we expect.
line = np.intersect1d(i1, i2)
orig = np.setdiff1d(i1, i2)
both = np.union1d(i1, i2)
b1 = bo1["index","morton_index"]
b1.sort()
b2 = bo2["index","morton_index"]
b2.sort()
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b1, line)
assert_array_equal(b2, orig)
assert_array_equal(b3, both)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, b4)
assert_array_equal(b1, b5)
bo6 = sl1 ^ sl2
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def union1d(ar1, ar2):
"""
Find the union of two arrays.
Return the unique, sorted array of values that are in either of the two
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. They are flattened if they are not already 1D.
Returns
-------
union1d : ndarray
Unique, sorted union of the input arrays.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.union1d([-1, 0, 1], [-2, 0, 2])
array([-2, -1, 0, 1, 2])
To find the union of more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([1, 2, 3, 4, 6])
"""
return unique(np.concatenate((ar1, ar2)))
def union1d(ar1, ar2):
"""
Find the union of two arrays.
Return the unique, sorted array of values that are in either of the two
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. They are flattened if they are not already 1D.
Returns
-------
union1d : ndarray
Unique, sorted union of the input arrays.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.union1d([-1, 0, 1], [-2, 0, 2])
array([-2, -1, 0, 1, 2])
To find the union of more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([1, 2, 3, 4, 6])
"""
return unique(np.concatenate((ar1, ar2)))
def _wmd(self, i, row, X_train):
"""Compute the WMD between training sample i and given test row.
Assumes that `row` and train samples are sparse BOW vectors summing to 1.
"""
union_idx = np.union1d(X_train[i].indices, row.indices) - 1
W_minimal = self.W_embed[union_idx]
W_dist = euclidean_distances(W_minimal)
bow_i = X_train[i, union_idx].A.ravel()
bow_j = row[:, union_idx].A.ravel()
return emd(bow_i, bow_j, W_dist)
def union1d(ar1, ar2):
"""
Find the union of two arrays.
Return the unique, sorted array of values that are in either of the two
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. They are flattened if they are not already 1D.
Returns
-------
union1d : ndarray
Unique, sorted union of the input arrays.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.union1d([-1, 0, 1], [-2, 0, 2])
array([-2, -1, 0, 1, 2])
To find the union of more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([1, 2, 3, 4, 6])
"""
return unique(np.concatenate((ar1, ar2)))
def _match_score(predicted_biclustering, reference_biclustering, bicluster_attr):
k = len(predicted_biclustering.biclusters)
return sum(max(len(np.intersect1d(getattr(bp, bicluster_attr), getattr(bt, bicluster_attr))) /
len(np.union1d(getattr(bp, bicluster_attr), getattr(bt, bicluster_attr)))
for bt in reference_biclustering.biclusters)
for bp in predicted_biclustering.biclusters) / k
def liu_wang_match_score(predicted_biclustering, reference_biclustering):
"""Liu & Wang match score.
Reference
---------
Liu, X., & Wang, L. (2006). Computing the maximum similarity bi-clusters of gene expression data.
Bioinformatics, 23(1), 50-56.
Horta, D., & Campello, R. J. G. B. (2014). Similarity measures for comparing biclusterings.
IEEE/ACM Transactions on Computational Biology and Bioinformatics, 11(5), 942-954.
Parameters
----------
predicted_biclustering : biclustlib.model.Biclustering
Predicted biclustering solution.
reference_biclustering : biclustlib.model.Biclustering
Reference biclustering solution.
Returns
-------
lw_match_score : float
Liu and Wang match score between 0.0 and 1.0.
"""
check = check_biclusterings(predicted_biclustering, reference_biclustering)
if isinstance(check, float):
return check
k = len(predicted_biclustering.biclusters)
return sum(max((len(np.intersect1d(bp.rows, br.rows)) + len(np.intersect1d(bp.cols, br.cols))) /
(len(np.union1d(bp.rows, br.rows)) + len(np.union1d(bp.cols, br.cols)))
for br in reference_biclustering.biclusters)
for bp in predicted_biclustering.biclusters) / k
def union1d(ar1, ar2):
"""
Find the union of two arrays.
Return the unique, sorted array of values that are in either of the two
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. They are flattened if they are not already 1D.
Returns
-------
union1d : ndarray
Unique, sorted union of the input arrays.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.union1d([-1, 0, 1], [-2, 0, 2])
array([-2, -1, 0, 1, 2])
To find the union of more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([1, 2, 3, 4, 6])
"""
return unique(np.concatenate((ar1, ar2)))
def crossGenotyper(args):
## Get the VCF file (filtered may be) generated by GATK.
## inputs:
# 1) VCF file
# 2) Parent1 and Parent2
# 3) SNP matrix (hdf5 file)
# 4) Bin length, default as 200Kbp
# 5) Chromosome length
log.info("loading genotype data for parents")
if args['father'] is not None:
log.info("input files: %s and %s" % (args['parents'], args['father']))
if not os.path.isfile(args['parents']) and os.path.isfile(args['father']):
die("either of the input files do not exists, please provide VCF/BED file for parent genotype information")
(p1snpCHR, p1snpPOS, p1snpGT, p1snpWEI, p1DPmean) = snpmatch.parseInput(inFile = args['parents'], logDebug = args['logDebug'])
(p2snpCHR, p2snpPOS, p2snpGT, p2snpWEI, p2DPmean) = snpmatch.parseInput(inFile = args['father'], logDebug = args['logDebug'])
commonCHRs_ids = np.union1d(p1snpCHR, p2snpCHR)
commonSNPsCHR = np.zeros(0, dtype=commonCHRs_ids.dtype)
commonSNPsPOS = np.zeros(0, dtype=int)
snpsP1 = np.zeros(0, dtype='int8')
snpsP2 = np.zeros(0, dtype='int8')
for i in commonCHRs_ids:
perchrP1inds = np.where(p1snpCHR == i)[0]
perchrP2inds = np.where(p2snpCHR == i)[0]
perchrPositions = np.union1d(p1snpPOS[perchrP1inds], p2snpPOS[perchrP2inds])
commonSNPsCHR = np.append(commonSNPsCHR, np.repeat(i, len(perchrPositions)))
commonSNPsPOS = np.append(commonSNPsPOS, perchrPositions)
perchrsnpsP1 = np.repeat(-1, len(perchrPositions)).astype('int8')
perchrsnpsP2 = np.repeat(-1, len(perchrPositions)).astype('int8')
perchrsnpsP1_inds = np.where(np.in1d(p1snpPOS[perchrP1inds], perchrPositions))[0]
perchrsnpsP2_inds = np.where(np.in1d(p2snpPOS[perchrP2inds], perchrPositions))[0]
snpsP1 = np.append(snpsP1, snpmatch.parseGT(p1snpGT[perchrsnpsP1_inds]))
snpsP2 = np.append(snpsP2, snpmatch.parseGT(p2snpGT[perchrsnpsP2_inds]))
log.info("done!")
else:
parents = args['parents']
## need to filter the SNPs present in C and M
if not args['hdf5accFile']:
snpmatch.die("needed a HDF5 genotype file and not specified")
log.info("loading HDF5 file")
g_acc = genotype.load_hdf5_genotype_data(args['hdf5accFile'])
## die if either parents are not in the dataset
#import ipdb; ipdb.set_trace()
try:
indP1 = np.where(g_acc.accessions == parents.split("x")[0])[0][0]
indP2 = np.where(g_acc.accessions == parents.split("x")[1])[0][0]
except:
snpmatch.die("parents are not in the dataset")
snpsP1 = g_acc.snps[:,indP1]
snpsP2 = g_acc.snps[:,indP2]
commonSNPsCHR = np.array(g_acc.chromosomes)
commonSNPsPOS = np.array(g_acc.positions)
log.info("done!")
log.info("running cross genotyper")
crossGenotypeWindows(commonSNPsCHR, commonSNPsPOS, snpsP1, snpsP2, args['inFile'], args['binLen'], args['outFile'], args['logDebug'])
def test_boolean_cylinders_overlap():
r"""Test to make sure that boolean objects (cylinders, overlap)
behave the way we expect.
Test overlapping cylinders.
"""
ds = fake_amr_ds()
cyl1 = ds.disk([0.45]*3, [1, 0, 0], 0.2, 0.2)
cyl2 = ds.disk([0.55]*3, [1, 0, 0], 0.2, 0.2)
# Get indices of both.
i1 = cyl1["index","morton_index"]
i2 = cyl2["index","morton_index"]
# Make some booleans
bo1 = cyl1 & cyl2
bo2 = cyl1 - cyl2
bo3 = cyl1 | cyl2
bo4 = ds.union([cyl1, cyl2])
bo5 = ds.intersection([cyl1, cyl2])
# Now make sure the indices also behave as we expect.
vlens = np.intersect1d(i1, i2)
bite_disk = np.setdiff1d(i1, i2)
both = np.union1d(i1, i2)
b1 = bo1["index","morton_index"]
b1.sort()
b2 = bo2["index","morton_index"]
b2.sort()
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b1, vlens)
assert_array_equal(b2, bite_disk)
assert_array_equal(b3, both)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, b4)
assert_array_equal(b1, b5)
bo6 = cyl1 ^ cyl2
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
del ds
def test_boolean_mix_periodicity():
r"""Test that a hybrid boolean region behaves as we expect.
This also tests nested logic and that periodicity works.
"""
ds = fake_amr_ds()
re = ds.region([0.5]*3, [0.0]*3, [1]*3) # whole thing
sp = ds.sphere([0.95]*3, 0.3) # wraps around
cyl = ds.disk([0.05]*3, [1,1,1], 0.1, 0.4) # wraps around
# Get original indices
rei = re["index","morton_index"]
spi = sp["index","morton_index"]
cyli = cyl["index","morton_index"]
# Make some booleans
# whole box minux spherical bites at corners
bo1 = re - sp
# sphere plus cylinder
bo2 = sp | cyl
# a jumble, the region minus the sp+cyl
bo3 = re - (sp | cyl)
# Now make sure the indices also behave as we expect.
bo4 = ds.union([re, sp, cyl])
bo5 = ds.intersection([re, sp, cyl])
expect = np.setdiff1d(rei, spi)
ii = bo1["index","morton_index"]
ii.sort()
assert_array_equal(expect, ii)
#
expect = np.union1d(spi, cyli)
ii = bo2["index","morton_index"]
ii.sort()
assert_array_equal(expect, ii)
#
expect = np.union1d(spi, cyli)
expect = np.setdiff1d(rei, expect)
ii = bo3["index","morton_index"]
ii.sort()
assert_array_equal(expect, ii)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
ii = np.union1d(np.union1d(rei, cyli), spi)
ii.sort()
assert_array_equal(ii, b4)
ii = np.intersect1d(np.intersect1d(rei, cyli), spi)
ii.sort()
assert_array_equal(ii, b5)
bo6 = (re ^ sp) ^ cyl
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(np.setxor1d(rei, spi), cyli))
def test_boolean_ray_region_overlap():
r"""Test to make sure that boolean objects (ray, region, overlap)
behave the way we expect.
Test overlapping ray and region. This also checks that the original
objects don't change as part of constructing the booleans.
"""
ds = fake_amr_ds()
re = ds.box([0.25]*3, [0.75]*3)
ra = ds.ray([0]*3, [1]*3)
# Get indices of both.
i1 = re["index","morton_index"]
i2 = ra["index","morton_index"]
# Make some booleans
bo1 = re & ra
bo2 = re - ra
bo3 = re | ra
bo4 = ds.union([re, ra])
bo5 = ds.intersection([re, ra])
# Now make sure the indices also behave as we expect.
short_line = np.intersect1d(i1, i2)
cube_minus_line = np.setdiff1d(i1, i2)
both = np.union1d(i1, i2)
b1 = bo1["index","morton_index"]
b1.sort()
b2 = bo2["index","morton_index"]
b2.sort()
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b1, short_line)
assert_array_equal(b2, cube_minus_line)
assert_array_equal(b3, both)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, b4)
assert_array_equal(b1, b5)
bo6 = re ^ ra
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def test_boolean_rays_overlap():
r"""Test to make sure that boolean objects (rays, overlap)
behave the way we expect.
Test non-overlapping rays.
"""
ds = fake_amr_ds()
ra1 = ds.ray([0]*3, [1]*3)
ra2 = ds.ray([0]*3, [0.5]*3)
# Get indices of both.
i1 = ra1["index","morton_index"]
i1.sort()
i2 = ra2["index","morton_index"]
i2.sort()
ii = np.concatenate((i1, i2))
ii.sort()
# Make some booleans
bo1 = ra1 & ra2
bo2 = ra1 - ra2
bo3 = ra1 | ra2
bo4 = ds.union([ra1, ra2])
bo5 = ds.intersection([ra1, ra2])
# Now make sure the indices also behave as we expect.
short_line = np.intersect1d(i1, i2)
short_line_b = np.setdiff1d(i1, i2)
full_line = np.union1d(i1, i2)
b1 = bo1["index","morton_index"]
b1.sort()
b2 = bo2["index","morton_index"]
b2.sort()
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b1, short_line)
assert_array_equal(b2, short_line_b)
assert_array_equal(b3, full_line)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, i1)
assert_array_equal(b3, b4)
assert_array_equal(b1, b5)
bo6 = ra1 ^ ra2
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))
def test_boolean_ray_slice_overlap():
r"""Test to make sure that boolean objects (rays and slices, overlap)
behave the way we expect.
Test overlapping rays and slices.
"""
ds = fake_amr_ds()
sl = ds.r[:,:,0.25]
ra = ds.ray([0, 0, 0.25], [0, 1, 0.25])
# Get indices of both.
i1 = sl["index","morton_index"]
i1.sort()
i2 = ra["index","morton_index"]
i1.sort()
ii = np.concatenate((i1, i2))
ii.sort()
# Make some booleans
bo1 = sl & ra
bo2 = sl - ra
bo3 = sl | ra
bo4 = ds.union([sl, ra])
bo5 = ds.intersection([sl, ra])
# Now make sure the indices also behave as we expect.
line = np.intersect1d(i1, i2)
sheet_minus_line = np.setdiff1d(i1, i2)
sheet = np.union1d(i1, i2)
b1 = bo1["index","morton_index"]
b1.sort()
b2 = bo2["index","morton_index"]
b2.sort()
b3 = bo3["index","morton_index"]
b3.sort()
assert_array_equal(b1, line)
assert_array_equal(b2, sheet_minus_line)
assert_array_equal(b3, sheet)
b4 = bo4["index","morton_index"]
b4.sort()
b5 = bo5["index","morton_index"]
b5.sort()
assert_array_equal(b3, i1)
assert_array_equal(b3, b4)
assert_array_equal(b1, b5)
bo6 = sl ^ ra
b6 = bo6["index", "morton_index"]
b6.sort()
assert_array_equal(b6, np.setxor1d(i1, i2))