def test_large_fancy_indexing(self, level=rlevel):
# Large enough to fail on 64-bit.
nbits = np.dtype(np.intp).itemsize * 8
thesize = int((2**nbits)**(1.0/5.0)+1)
def dp():
n = 3
a = np.ones((n,)*5)
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)] = 0
def dp2():
n = 3
a = np.ones((n,)*5)
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)]
self.assertRaises(ValueError, dp)
self.assertRaises(ValueError, dp2)
python类ix_()的实例源码
def graphlet_kernel(graphs, num_samples):
N = len(graphs)
Phi = np.zeros((N,2**15))
P = generate_permutation_matrix()
for i in range(len(graphs)):
n = graphs[i].number_of_nodes()
if n >= 6:
A = nx.to_numpy_matrix(graphs[i])
A = np.asarray(A, dtype=np.uint8)
for j in range(num_samples):
r = np.random.permutation(n)
window = A[np.ix_(r[:6],r[:6])]
Phi[i, graphlet_type(window)] += 1
Phi[i,:] /= num_samples
K = np.dot(Phi,np.dot(P,np.transpose(Phi)))
return K
def MakeEquationSystem_volumeControl_extendedFP(w_lst_tmstp, wTip, EltChannel, EltTip, C, dt, Q, ElemArea):
Ccc = C[np.ix_(EltChannel, EltChannel)]
Cct = C[np.ix_(EltChannel, EltTip)]
A = np.hstack((Ccc,-np.ones((EltChannel.size,1),dtype=np.float64)))
A = np.vstack((A, np.ones((1, EltChannel.size + 1), dtype=np.float64)))
A[-1,-1] = 0
S = -np.dot(Ccc,w_lst_tmstp[EltChannel]) - np.dot(Cct,wTip)
S = np.append(S,Q * dt / ElemArea - (sum(wTip)-sum(w_lst_tmstp[EltTip])))
return A, S
#-----------------------------------------------------------------------------------------------------------------------
def jw_number_restrict_operator(operator, n_electrons, n_qubits=None):
"""Restrict a Jordan-Wigner encoded operator to a given particle number
Args:
sparse_operator(ndarray or sparse): Numpy operator acting on
the space of n_qubits.
n_electrons(int): Number of particles to restrict the operator to
n_qubits(int): Number of qubits defining the total state
Returns:
new_operator(ndarray or sparse): Numpy operator restricted to
acting on states with the same particle number.
"""
if n_qubits is None:
n_qubits = int(numpy.log2(operator.shape[0]))
select_indices = jw_number_indices(n_electrons, n_qubits)
return operator[numpy.ix_(select_indices, select_indices)]
def _M2_sparse_sym(Xvar, mask_X, Yvar, mask_Y, weights=None):
""" 2nd self-symmetric moment matrix exploiting zero input columns
Computes X'X + Y'Y and X'Y + Y'X
"""
assert len(mask_X) == len(mask_Y), 'X and Y need to have equal sizes for symmetrization'
Cxxyy = np.zeros((len(mask_X), len(mask_Y)))
Cxxyy[np.ix_(mask_X, mask_X)] = _M2_dense(Xvar, Xvar, weights=weights)
Cxxyy[np.ix_(mask_Y, mask_Y)] += _M2_dense(Yvar, Yvar, weights=weights)
Cxyyx = np.zeros((len(mask_X), len(mask_Y)))
Cxy = _M2_dense(Xvar, Yvar, weights=weights)
Cyx = _M2_dense(Yvar, Xvar, weights=weights)
Cxyyx[np.ix_(mask_X, mask_Y)] = Cxy
Cxyyx[np.ix_(mask_Y, mask_X)] += Cyx
return Cxxyy, Cxyyx
def test_large_fancy_indexing(self, level=rlevel):
# Large enough to fail on 64-bit.
nbits = np.dtype(np.intp).itemsize * 8
thesize = int((2**nbits)**(1.0/5.0)+1)
def dp():
n = 3
a = np.ones((n,)*5)
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)] = 0
def dp2():
n = 3
a = np.ones((n,)*5)
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)]
self.assertRaises(ValueError, dp)
self.assertRaises(ValueError, dp2)
def test_large_fancy_indexing(self, level=rlevel):
# Large enough to fail on 64-bit.
nbits = np.dtype(np.intp).itemsize * 8
thesize = int((2**nbits)**(1.0/5.0)+1)
def dp():
n = 3
a = np.ones((n,)*5)
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)] = 0
def dp2():
n = 3
a = np.ones((n,)*5)
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)]
self.assertRaises(ValueError, dp)
self.assertRaises(ValueError, dp2)
def _cartesian_product(*arrays):
"""
Get the cartesian product of a number of arrays.
Parameters
----------
arrays : Iterable[np.ndarray]
The arrays to get a cartesian product of. Always sorted with respect
to the original array.
Returns
-------
out : np.ndarray
The overall cartesian product of all the input arrays.
"""
broadcastable = np.ix_(*arrays)
broadcasted = np.broadcast_arrays(*broadcastable)
rows, cols = np.prod(broadcasted[0].shape), len(broadcasted)
dtype = np.result_type(*arrays)
out = np.empty(rows * cols, dtype=dtype)
start, end = 0, rows
for a in broadcasted:
out[start:end] = a.reshape(-1)
start, end = end, end + rows
return out.reshape(cols, rows)
def test_large_fancy_indexing(self, level=rlevel):
# Large enough to fail on 64-bit.
nbits = np.dtype(np.intp).itemsize * 8
thesize = int((2**nbits)**(1.0/5.0)+1)
def dp():
n = 3
a = np.ones((n,)*5)
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)] = 0
def dp2():
n = 3
a = np.ones((n,)*5)
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)]
self.assertRaises(ValueError, dp)
self.assertRaises(ValueError, dp2)
def _find_motif(self, data, row_indices):
"""Finds the largest xMOTIF (this is the direct implementation of the
pseucode of the FindMotif() procedure described in the original paper).
"""
num_rows, num_cols = data.shape
best_motif = Bicluster([], [])
seeds = np.random.choice(num_cols, self.num_seeds, replace=False)
for s in seeds:
seed_col = data[row_indices, s][:, np.newaxis]
for i in range(self.num_sets):
cols_set = np.random.choice(num_cols, self.set_size, replace=False)
rows_comp_data = seed_col == data[np.ix_(row_indices, cols_set)]
selected_rows = np.array([y for x, y in enumerate(row_indices) if np.all(rows_comp_data[x])], np.int)
seed_values = data[selected_rows, s][:, np.newaxis]
cols_comp_data = seed_values == data[selected_rows]
selected_cols = np.array([k for k in range(num_cols) if np.all(cols_comp_data[:, k])])
if len(selected_cols) >= self.alpha * num_cols and len(selected_rows) > len(best_motif.rows):
best_motif = Bicluster(selected_rows, selected_cols)
return best_motif
def _find_constrained_bicluster(self, data):
"""Find a k x l bicluster."""
num_rows, num_cols = data.shape
k = random.randint(1, math.ceil(num_rows / 2))
l = random.randint(1, math.ceil(num_cols / 2))
cols = np.random.choice(num_cols, size=l, replace=False)
old_avg, avg = float('-inf'), 0.0
while abs(avg - old_avg) > self.tol:
old_avg = avg
row_sums = np.sum(data[:, cols], axis=1)
rows = bn.argpartition(row_sums, num_rows - k)[-k:] # this is usually faster than rows = np.argsort(row_sums)[-k:]
col_sums = np.sum(data[rows, :], axis=0)
cols = bn.argpartition(col_sums, num_cols - l)[-l:] # this is usually faster than cols = np.argsort(col_sums)[-l:]
avg = np.mean(data[np.ix_(rows, cols)])
return Bicluster(rows, cols)
def compute_activity_matrix(self, xywrap, thwrap, wdim, pcw):
"""Compute the activation of pose cells. Taken from Renato de Pontes Pereira"""
# The goal is to return an update matrix that can be added/subtracted
# from the posecell matrix
pca_new = np.zeros([PC_DIM_XY, PC_DIM_XY, PC_DIM_TH])
# for nonzero posecell values
indices = np.nonzero(self.posecells)
for i,j,k in itertools.izip(*indices):
pca_new[np.ix_(xywrap[i:i+wdim],
xywrap[j:j+wdim],
thwrap[k:k+wdim])] += self.posecells[i,j,k]*pcw
return pca_new
def test_large_fancy_indexing(self, level=rlevel):
# Large enough to fail on 64-bit.
nbits = np.dtype(np.intp).itemsize * 8
thesize = int((2**nbits)**(1.0/5.0)+1)
def dp():
n = 3
a = np.ones((n,)*5)
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)] = 0
def dp2():
n = 3
a = np.ones((n,)*5)
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)]
self.assertRaises(ValueError, dp)
self.assertRaises(ValueError, dp2)
def section_by_index(array, index, axis=0):
"""
Take the slice of `array` indexed by entries of `index`
along the specified `axis`.
"""
# alternative `axisindex` implementation
# that avoids the index arithmetic
# uses `numpy` fancy indexing instead
# possible index values for each dimension represented
# as `numpy` arrays all having the shape of `index`
indices = np.ix_(*[np.arange(dim) for dim in index.shape])
# the slice is taken along `axis`
# except for the array `index` itself, the other indices
# do nothing except trigger `numpy` fancy indexing
fancy_index = indices[:axis] + (index,) + indices[axis:]
# result has the same shape as `index`
return array[fancy_index]
def get_element_type_subset_indices(self):
"""
It is currently required that the element of two matching atoms is the same.
This constructs indices to e.g. the carbon-carbon submatrix.
"""
# TODO: this is redundant if the elements does not have to match
unique_elements = np.unique(self.reactants_elements)
subset_indices = np.empty(unique_elements.size, dtype=object)
for i, element in enumerate(unique_elements):
rows = np.where(self.reactants_elements == element)[0]
cols = np.where(self.products_elements == element)[0]
subset_indices[i] = np.ix_(rows,cols)
return subset_indices
def test_large_fancy_indexing(self, level=rlevel):
# Large enough to fail on 64-bit.
nbits = np.dtype(np.intp).itemsize * 8
thesize = int((2**nbits)**(1.0/5.0)+1)
def dp():
n = 3
a = np.ones((n,)*5)
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)] = 0
def dp2():
n = 3
a = np.ones((n,)*5)
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)]
self.assertRaises(ValueError, dp)
self.assertRaises(ValueError, dp2)
def test_regression_1(self):
# Test empty inputs create ouputs of indexing type, gh-5804
# Test both lists and arrays
for func in (range, np.arange):
a, = np.ix_(func(0))
assert_equal(a.dtype, np.intp)
def test_shape_and_dtype(self):
sizes = (4, 5, 3, 2)
# Test both lists and arrays
for func in (range, np.arange):
arrays = np.ix_(*[func(sz) for sz in sizes])
for k, (a, sz) in enumerate(zip(arrays, sizes)):
assert_equal(a.shape[k], sz)
assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k))
assert_(np.issubdtype(a.dtype, int))
def test_bool(self):
bool_a = [True, False, True, True]
int_a, = np.nonzero(bool_a)
assert_equal(np.ix_(bool_a)[0], int_a)
def test_1d_only(self):
idx2d = [[1, 2, 3], [4, 5, 6]]
assert_raises(ValueError, np.ix_, idx2d)
def _gaus_condition(self, xi):
if np.ma.count_masked(xi) == 0:
return xi
a = xi.mask
b = ~xi.mask
xb = xi[b].data
Laa = self.prec[np.ix_(a, a)]
Lab = self.prec[np.ix_(a, b)]
xfill = np.empty_like(xi)
xfill[b] = xb
xfill[a] = self.mean[a] - solve(Laa, Lab.dot(xb - self.mean[b]))
return xfill
def gacPathCondEntropy(IminuszW, cluster_i, cluster_j):
# Compute conditional complexity from the subpart of the weighted adjacency matrix
# Inputs:
# - IminuszW: the matrix (I - z*P)
# - cluster_i: index vector of cluster i
# - cluster_j: index vector of cluster j
# Output:
# - L_ij - the sum of conditional complexities of cluster i and j after merging.
# by Wei Zhang (wzhang009 at gmail.com), June, 8, 2011
num_i = np.size(cluster_i)
num_j = np.size(cluster_j)
# detecting cross elements (this check costs much and is unnecessary)
ijGroupIndex = np.append(cluster_i, cluster_j)
y_ij = np.zeros((num_i + num_j, 2)) # [y_i, y_j]
y_ij[:num_i, 0] = 1
y_ij[num_i:, 1] = 1
idx = np.ix_(ijGroupIndex, ijGroupIndex)
L_ij = scipy.linalg.inv(IminuszW[idx]).dot(y_ij)
L_ij = sum(L_ij[:num_i, 0]) / (num_i * num_i) + sum(L_ij[num_i:, 1]) / (num_j * num_j)
return L_ij
szemeredi_regularity_lemma.py 文件源码
项目:dense_graph_reducer
作者: MarcoFiorucci
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def reconstruct_original_mat(self, thresh, intracluster_weight=0):
"""
reconstruct a similarity matrix with size equals to the original one, from the reduced similarity matrix
:param thresh: a threshold parameter to prune the edges of the graph
:param intracluster_weight: the weight to assign at each connection generated by the expansion of a cluster
:return: the reconstructed graph
"""
reconstructed_mat = np.zeros((self.N, self.N))
r_nodes = self.classes > 0
reconstructed_mat[np.ix_(r_nodes, r_nodes)] = intracluster_weight
for r in range(2, self.k + 1):
r_nodes = self.classes == r
reconstructed_mat[np.ix_(r_nodes, r_nodes)] = intracluster_weight
for s in range(1, r):
if self.is_weighted:
cl_pair = WeightedClassesPair(self.sim_mat, self.adj_mat, self.classes, r, s, self.epsilon)
else:
cl_pair = ClassesPair(self.adj_mat, self.classes, r, s, self.epsilon)
s_nodes = self.classes == s
if cl_pair.bip_density > thresh:
reconstructed_mat[np.ix_(r_nodes, s_nodes)] = reconstructed_mat[np.ix_(s_nodes, r_nodes)] = cl_pair.bip_density
np.fill_diagonal(reconstructed_mat, 0.0)
return reconstructed_mat
def __init__(self, adj_mat, classes, r, s, epsilon):
self.r = r
self.s = s
self.index_map = np.where(classes == r)[0]
self.index_map = np.vstack((self.index_map, np.where(classes == s)[0]))
self.bip_adj_mat = adj_mat[np.ix_(self.index_map[0], self.index_map[1])]
self.n = self.bip_adj_mat.shape[0]
self.bip_avg_deg = self.bip_avg_degree()
self.bip_density = self.compute_bip_density()
self.epsilon = epsilon
def __init__(self, sim_mat, adj_mat, classes, r, s, epsilon):
self.r = r
self.s = s
self.index_map = np.where(classes == r)[0]
self.index_map = np.vstack((self.index_map, np.where(classes == s)[0]))
self.bip_sim_mat = sim_mat[np.ix_(self.index_map[0], self.index_map[1])]
self.bip_adj_mat = adj_mat[np.ix_(self.index_map[0], self.index_map[1])]
self.n = self.bip_sim_mat.shape[0]
self.bip_avg_deg = self.bip_avg_degree()
self.bip_density = self.compute_bip_density()
self.epsilon = epsilon
def bin_sizes(self):
sizes1 = np.cos(self.get_bin_left_edges(0)) - np.cos(self.get_bin_right_edges(0))
sizes2 = self.get_bin_widths(1)
return reduce(np.multiply, np.ix_(sizes1, sizes2))
def bin_sizes(self):
sizes1 = (self.get_bin_right_edges(0) ** 3 - self.get_bin_left_edges(0) ** 3) / 3
sizes2 = np.cos(self.get_bin_left_edges(1)) - np.cos(self.get_bin_right_edges(1))
sizes3 = self.get_bin_widths(2)
# Hopefully correct
return reduce(np.multiply, np.ix_(sizes1, sizes2,sizes3))
#return np.outer(sizes, sizes2, self.get_bin_widths(2)) # Correct
def bin_sizes(self):
sizes1 = 0.5 * (self.get_bin_right_edges(0) ** 2 - self.get_bin_left_edges(0) ** 2)
sizes2 = self.get_bin_widths(1)
sizes3 = self.get_bin_widths(2)
return reduce(np.multiply, np.ix_(sizes1, sizes2, sizes3))
def reduce_distmat(full_dist_mat,
gal_templateids,
probe_templateids,
reduce_type=ReduceType.MeanMin):
# Get unique template indices and there positions for keeping initial order
#gal_tuids,gal_tuind=np.unique(gal_templateids,return_index=True)
#probe_tuids,probe_tuind=np.unique(probe_templateids,return_index=True)
gal_tuids, gal_tuind = np.unique(
[str(x) for x in gal_templateids], return_index=True)
probe_tuids, probe_tuind = np.unique(
[str(x) for x in probe_templateids], return_index=True)
red_dist_mat = np.zeros((len(gal_tuids), len(probe_tuids)))
# Loop on gallery
for g, gtupos in enumerate(gal_tuind):
gutid = gal_templateids[gtupos]
gt_pos = np.where(gal_templateids == gutid)[0]
# Loop on probe
for p, ptupos in enumerate(probe_tuind):
putid = probe_templateids[ptupos]
pt_pos = np.where(probe_templateids == putid)[0]
# Get appropriate distance
#print g,p
dist_val = 0.0
# TO BE FIXED
if reduce_type == ReduceType.MeanMin:
dist_val = np.mean(np.min(full_dist_mat[np.ix_(gt_pos, pt_pos)]))
else:
dist_val = np.amin(full_dist_mat[np.ix_(gt_pos, pt_pos)])
red_dist_mat[g, p] = dist_val
return red_dist_mat, gal_tuind, probe_tuind
def test_regression_1(self):
# Test empty inputs create ouputs of indexing type, gh-5804
# Test both lists and arrays
for func in (range, np.arange):
a, = np.ix_(func(0))
assert_equal(a.dtype, np.intp)