def get_metrics_per_contact_type(predicted_cmap, target_cmap):
L = predicted_cmap.shape[0]
short_range = np.zeros((L, L), dtype = np.bool)
medium_range = np.zeros((L, L), dtype = np.bool)
long_range = np.zeros((L, L), dtype = np.bool)
more_than_6 = np.tril_indices(L, -6)
more_than_12 = np.tril_indices(L, -12)
more_than_24 = np.tril_indices(L, -24)
short_range[more_than_6] = True
short_range[more_than_12] = False
medium_range[more_than_12] = True
medium_range[more_than_24] = False
long_range[more_than_24] = True
short_range = np.logical_or(short_range, short_range.T)
medium_range = np.logical_or(medium_range, medium_range.T)
long_range = np.logical_or(long_range, long_range.T)
short_range_metrics = get_metrics(predicted_cmap[short_range], target_cmap[short_range])
medium_range_metrics = get_metrics(predicted_cmap[medium_range], target_cmap[medium_range])
long_range_metrics = get_metrics(predicted_cmap[long_range], target_cmap[long_range])
return short_range_metrics, medium_range_metrics, long_range_metrics
python类tril_indices()的实例源码
def _chol_idx(self, n_C, rank):
l_idx = np.tril_indices(n_C)
if rank is not None:
# The rank of covariance matrix is specified
idx_rank = np.where(l_idx[1] < rank)
l_idx = (l_idx[0][idx_rank], l_idx[1][idx_rank])
logger.info('Using the rank specified by the user: '
'{}'.format(rank))
else:
rank = n_C
# if not specified, we assume you want to
# estimate a full rank matrix
logger.warning('Please be aware that you did not specify the'
' rank of covariance matrix to estimate.'
'I will assume that the covariance matrix '
'shared among voxels is of full rank.'
'Rank = {}'.format(rank))
logger.warning('Please be aware that estimating a matrix of '
'high rank can be very slow.'
'If you have a good reason to specify a rank '
'lower than the number of experiment conditions,'
' do so.')
return l_idx, rank
def forward(self, x):
"""
Transforms from the free state to the variable.
Args:
x: Free state vector. Must have length of `self.num_matrices` *
triangular_number.
Returns:
Reconstructed variable.
"""
L = self._validate_vector_length(len(x))
matsize = int((L * 8 + 1) ** 0.5 * 0.5 - 0.5)
xr = np.reshape(x, (self.num_matrices, -1))
var = np.zeros((matsize, matsize, self.num_matrices), settings.float_type)
for i in range(self.num_matrices):
indices = np.tril_indices(matsize, 0)
var[indices + (np.zeros(len(indices[0])).astype(int) + i,)] = xr[i, :]
return var.squeeze() if self.squeeze else var
def vec_to_tri(vectors, N):
"""
Takes a D x M tensor `vectors' and maps it to a D x matrix_size X matrix_sizetensor
where the where the lower triangle of each matrix_size x matrix_size matrix is
constructed by unpacking each M-vector.
Native TensorFlow version of Custom Op by Mark van der Wilk.
def int_shape(x):
return list(map(int, x.get_shape()))
D, M = int_shape(vectors)
N = int( np.floor( 0.5 * np.sqrt( M * 8. + 1. ) - 0.5 ) )
# Check M is a valid triangle number
assert((matrix * (N + 1)) == (2 * M))
"""
indices = list(zip(*np.tril_indices(N)))
indices = tf.constant([list(i) for i in indices], dtype=tf.int64)
def vec_to_tri_vector(vector):
return tf.scatter_nd(indices=indices, shape=[N, N], updates=vector)
return tf.map_fn(vec_to_tri_vector, vectors)
def makeImgPatchPrototype(D, compID):
''' Create image patch prototype for specific component
Returns
--------
Xprototype : sqrt(D) x sqrt(D) matrix
'''
# Create a "prototype" image patch of PxP pixels
P = np.sqrt(D)
Xprototype = np.zeros((P, P))
if compID % 4 == 0:
Xprototype[:P / 2] = 1.0
Xprototype = np.rot90(Xprototype, compID / 4)
if compID % 4 == 1:
Xprototype[np.tril_indices(P)] = 1
Xprototype = np.rot90(Xprototype, (compID - 1) / 4)
if compID % 4 == 2:
Xprototype[np.tril_indices(P, 2)] = 1
Xprototype = np.rot90(Xprototype, (compID - 2) / 4)
if compID % 4 == 3:
Xprototype[np.tril_indices(P, -2)] = 1
Xprototype = np.rot90(Xprototype, (compID - 3) / 4)
return Xprototype
def extract_patches(self, all_seqdata, with_targets=True, inplace=False):
print("\tConverting dataset to Numpy array...")
L0_X, L0_Y = list(), list()
for seqdata in all_seqdata:
if not inplace:
seqdata = copy.deepcopy(seqdata)
L = len(seqdata)
tril_indices = np.tril_indices(L, -1) # Lower triangle without diagonal
cpreds = seqdata.cpreds
for cpred in cpreds:
assert(len(cpred.shape) == 2)
if cpred is not None:
cpred_shape = cpred.shape
# Create contact maps from file data
print(len(cpreds))
for i in range(len(cpreds)):
if cpreds[i] is None:
cpreds[i] = np.full(cpred_shape, Params.DISTANCE_NAN, dtype=Params.FLOAT_DTYPE)
else:
cpreds[i][np.isnan(cpreds[i])] = Params.DISTANCE_NAN
cpreds[i] = np.asarray(cpreds[i][tril_indices], dtype=Params.FLOAT_DTYPE)
cpreds = np.asarray(cpreds, dtype=Params.FLOAT_DTYPE).T
L0_X.append(cpreds)
if with_targets:
contacts = distances_to_contacts(seqdata.distances)
contacts = contacts[tril_indices]
L0_Y.append(contacts)
L0_X = np.concatenate(L0_X, axis=0)
if with_targets:
L0_Y = np.concatenate(L0_Y, axis=0)
return (L0_X, L0_Y) if with_targets else L0_X
def separation_under_k_aa(cmap):
L = cmap.shape[0]
mask = np.ones((L, L), dtype=np.bool)
more_than_k = np.tril_indices(L, -Params.MIN_AA_SEPARATION)
mask[more_than_k] = False
mask = np.logical_and(mask, mask.T)
return mask
def _non_diagonal_idx_array(batch_size, n):
idx_offsets = np.arange(
start=0, stop=batch_size * n * n, step=n * n, dtype=np.int32).reshape(
(batch_size, 1))
idx = np.ravel_multi_index(
np.tril_indices(n, -1), (n, n)).reshape((1, -1)).astype(np.int32)
return cuda.to_gpu(idx + idx_offsets)
def _get_batch_non_diagonal_cpu(array):
batch_size, m, n = array.shape
assert m == n
rows, cols = np.tril_indices(n, -1)
return array[:, rows, cols]
def _set_batch_non_diagonal_cpu(array, non_diag_val):
batch_size, m, n = array.shape
assert m == n
rows, cols = np.tril_indices(n, -1)
array[:, rows, cols] = non_diag_val
def check_forward(self, diag_data, non_diag_data):
diag = chainer.Variable(diag_data)
non_diag = chainer.Variable(non_diag_data)
y = lower_triangular_matrix(diag, non_diag)
correct_y = numpy.zeros(
(self.batch_size, self.n, self.n), dtype=numpy.float32)
tril_rows, tril_cols = numpy.tril_indices(self.n, -1)
correct_y[:, tril_rows, tril_cols] = cuda.to_cpu(non_diag_data)
diag_rows, diag_cols = numpy.diag_indices(self.n)
correct_y[:, diag_rows, diag_cols] = cuda.to_cpu(diag_data)
gradient_check.assert_allclose(correct_y, cuda.to_cpu(y.data))
def getImageDescriptors_HOG_cdist(self, all_emb, ref_emb, ref_mask):
# unnormalized cosine distance for HOG
dist = numpy.dot(all_emb, ref_emb.T)
# normalize by length of query descriptor projected on reference
norm = numpy.sqrt(numpy.dot(numpy.square(all_emb), ref_mask.T))
dist /= norm
dist[numpy.isinf(dist)] = 0.
dist[numpy.isnan(dist)] = 0.
# dist[numpy.triu_indices(dist.shape[0], 1)] = numpy.maximum(dist[numpy.triu_indices(dist.shape[0], 1)],
# dist.T[numpy.triu_indices(dist.shape[0], 1)])
# dist[numpy.tril_indices(dist.shape[0], -1)] = 0.
# dist += dist.T
return dist
def mk_test(x, alpha = 0.05):
n = len(x)
# calculate S
listMa = np.matrix(x) # convert input List to 1D matrix
subMa = np.sign(listMa.T - listMa) # calculate all possible differences in matrix
# with itself and save only sign of difference (-1,0,1)
s = np.sum( subMa[np.tril_indices(n,-1)] ) # sum lower left triangle of matrix
# calculate the unique data
# return_counts=True returns a second array that is equivalent to tp in old version
unique_x = np.unique(x, return_counts=True)
g = len(unique_x[0])
# calculate the var(s)
if n == g: # there is no tie
var_s = (n*(n-1)*(2*n+5))/18
else: # there are some ties in data
tp = unique_x[1]
var_s = (n*(n-1)*(2*n+5) + np.sum(tp*(tp-1)*(2*tp+5)))/18
if s>0:
z = (s - 1)/np.sqrt(var_s)
elif s == 0:
z = 0
elif s<0:
z = (s + 1)/np.sqrt(var_s)
# calculate the p_value
p = 2*(1-scipy.stats.norm.cdf(abs(z))) # two tail test
h = abs(z) > scipy.stats.norm.ppf(1-alpha/2)
return h, p
#########################################################################################
#hdf to tiff
#########################################################################################
def reference_inverse(self, matrices):
# This is the inverse operation of the vec_to_tri op being tested.
D, N, _ = matrices.shape
M = (N * (N + 1)) // 2
tril_indices = np.tril_indices(N)
output = np.zeros((D, M))
for vector_index in range(D):
matrix = matrices[vector_index, :]
output[vector_index, :] = matrix[tril_indices]
return output
def backward(self, y):
"""
Transforms from the variable to the free state.
Args:
y: Variable representation.
Returns:
Free state.
"""
N = int(np.sqrt(y.size / self.num_matrices))
reshaped = np.reshape(y, (N, N, self.num_matrices))
size = len(reshaped)
triangular = reshaped[np.tril_indices(size, 0)].T
return triangular
def unvech_kh(v, cols_stacked=True, norm_conserved=False):
# Restore matrix dimension and add triangular
v = v.flatten()
d = int(0.5 * (np.sqrt(8 * len(v) + 1) - 1))
X = np.empty((d, d))
triu_idx_r = []
triu_idx_c = []
# Find appropriate indexes
if cols_stacked:
for c in range(0, d):
for r in range(0, c+1):
triu_idx_r.append(r)
triu_idx_c.append(c)
else:
for r in range(0, d):
for c in range(r, d):
triu_idx_r.append(r)
triu_idx_c.append(c)
# Restore original matrix
triu_idx = (triu_idx_r, triu_idx_c)
X[triu_idx] = v
X[np.tril_indices(d)] = X.T[np.tril_indices(d)]
# Undo rescaling on off diagonal elements
if norm_conserved:
X[np.triu_indices(d, 1)] /= np.sqrt(2)
X[np.tril_indices(d, -1)] /= np.sqrt(2)
return X
def test_naf_layer_full():
batch_size = 2
for nb_actions in (1, 3):
# Construct single model with NAF as the only layer, hence it is fully deterministic
# since no weights are used, which would be randomly initialized.
L_flat_input = Input(shape=((nb_actions * nb_actions + nb_actions) // 2,))
mu_input = Input(shape=(nb_actions,))
action_input = Input(shape=(nb_actions,))
x = NAFLayer(nb_actions, mode='full')([L_flat_input, mu_input, action_input])
model = Model(inputs=[L_flat_input, mu_input, action_input], outputs=x)
model.compile(loss='mse', optimizer='sgd')
# Create random test data.
L_flat = np.random.random((batch_size, (nb_actions * nb_actions + nb_actions) // 2)).astype('float32')
mu = np.random.random((batch_size, nb_actions)).astype('float32')
action = np.random.random((batch_size, nb_actions)).astype('float32')
# Perform reference computations in numpy since these are much easier to verify.
L = np.zeros((batch_size, nb_actions, nb_actions)).astype('float32')
LT = np.copy(L)
for l, l_T, l_flat in zip(L, LT, L_flat):
l[np.tril_indices(nb_actions)] = l_flat
l[np.diag_indices(nb_actions)] = np.exp(l[np.diag_indices(nb_actions)])
l_T[:, :] = l.T
P = np.array([np.dot(l, l_T) for l, l_T in zip(L, LT)]).astype('float32')
A_ref = np.array([np.dot(np.dot(a - m, p), a - m) for a, m, p in zip(action, mu, P)]).astype('float32')
A_ref *= -.5
# Finally, compute the output of the net, which should be identical to the previously
# computed reference.
A_net = model.predict([L_flat, mu, action]).flatten()
assert_allclose(A_net, A_ref, rtol=1e-5)
def __init__(self, tv_dim, ndim, nmix):
self.tv_dim = tv_dim
self.ndim = ndim
self.nmix = nmix
self.itril = np.tril_indices(tv_dim)
self.Sigma = np.empty((self.ndim * self.nmix, 1))
self.T_iS = None # np.empty((self.tv_dim, self.ndim * self.nmix))
self.T_iS_Tt = None # np.empty((self.nmix, self.tv_dim * (self.tv_dim+1)/2))
self.Tm = np.empty((self.tv_dim, self.ndim * self.nmix))
self.Im = np.eye(self.tv_dim)
def corrcoef_matrix(matrix):
# Code originating from http://stackoverflow.com/a/24547964 by http://stackoverflow.com/users/2455058/jingchao
r = np.corrcoef(matrix)
rf = r[np.triu_indices(r.shape[0], 1)]
df = matrix.shape[1] - 2
ts = rf * rf * (df / (1 - rf * rf))
pf = betai(0.5 * df, 0.5, df / (df + ts))
p = np.zeros(shape=r.shape)
p[np.triu_indices(p.shape[0], 1)] = pf
p[np.tril_indices(p.shape[0], -1)] = pf
p[np.diag_indices(p.shape[0])] = np.ones(p.shape[0])
return r, p
def vector_from_array(array):
"""Get triangle of output in vector from a correlation-type array
Parameters
----------
array : np.array
Returns
-------
vector (np.array)
Notes
-----
Old Matlab code indexes by column (aka.Fortran-style), so to get the indices
of the top triangle, we have to do some reshaping.
Otherwise, if the vector made up by rows is OK, then simply :
triangle = np.triu_indices(array.size, k=1), out = array[triangle]
"""
triangle_lower = np.tril_indices(array.shape[0], k=-1)
flatten_idx = np.arange(array.size).reshape(array.shape)[triangle_lower]
triangle = np.unravel_index(flatten_idx, array.shape, order='F')
# triangle = np.triu_indices(array.size, k=1)
# out = array[triangle]
return array[triangle]
def feature_extractor_2(state, action, normalize=True):
# features are
# - bias
# - the elements of the state vector
# - all second order terms (mixed and pure)
# - sines of every first and second order term
# - cosines of every first and second order term
# all of that times two (i.e. for each of the actions)
s_prime = np.r_[1, state] # dim(state) + 1
# dim(state) (dim(state) + 1) / 2
quadratic = np.outer(s_prime, s_prime)[np.tril_indices(s_prime.shape[0])].reshape(-1)
# (dim(state) (dim(state) + 1) / 2) - 1
sines = np.sin(quadratic[1:])
cosines = np.cos(quadratic[1:])
# dim(state)(dim(state) + 1) - 2 + dim(state)(dim(state) + 1) / 2
state_feats = np.r_[quadratic, sines, cosines]
# dim(state_feats) * 2
features = np.outer(state_feats, np.array([0, 1]) == action).T.reshape(-1)
if normalize:
# normalize everything but the bias.
norm = features / (np.linalg.norm(features))
norm[0] = int(action == 0) * 1.0
norm[state_feats.shape[0]] = int(action == 1) * 1.0
return norm
else:
return features
def _get_lowTriIDs(K):
if K in lowTriIDsDict:
return lowTriIDsDict[K]
else:
ltIDs = np.tril_indices(K)
lowTriIDsDict[K] = ltIDs
return ltIDs
def _get_lowTriIDs(K):
if K in lowTriIDsDict:
return lowTriIDsDict[K]
else:
ltIDs = np.tril_indices(K)
lowTriIDsDict[K] = ltIDs
return ltIDs
def _scorematrix2rankedlist_greedy(M, nPairs, doKeepZeros=False):
''' Return the nPairs highest-ranked pairs in score matrix M
Args
-------
M : score matrix, K x K
should have only entries kA,kB where kA <= kB
Returns
--------
aList : list of integer ids for rows of M
bList : list of integer ids for cols of M
Example
---------
_scorematrix2rankedlist( [0 2 3], [0 0 1], [0 0 0], 3)
>> [ (0,2), (0,1), (1,2)]
'''
M = M.copy()
M[np.tril_indices(M.shape[0])] = - np.inf
Mflat = M.flatten()
sortIDs = np.argsort(-1 * Mflat)
# Remove any entries that are -Inf
sortIDs = sortIDs[Mflat[sortIDs] != -np.inf]
if not doKeepZeros:
# Remove any entries that are zero
sortIDs = sortIDs[Mflat[sortIDs] != 0]
bestrs, bestcs = np.unravel_index(sortIDs, M.shape)
return bestrs[:nPairs].tolist(), bestcs[:nPairs].tolist()
def _tril_indices(n, k=0):
"""Replacement for tril_indices that is provided for numpy >= 1.4"""
mask = np.greater_equal(np.subtract.outer(np.arange(n), np.arange(n)), -k)
indices = np.where(mask)
return indices
def _get_strength_matrix(self):
assert self.pmi.shape == self.ts_correlation.shape
assert is_square(self.pmi)
a = np.multiply(self.pmi, self.ts_correlation)
lower_indexes = np.tril_indices(self.pmi.shape[0])
a[lower_indexes] = np.nan
return a
def tril_elements(M):
'''
Somewhat like matlab's "diag" function, but for lower triangular matrices
tril_elements(randn(D*(D+1)//2))
'''
if len(M.shape)==2:
# M is a matrix
if not M.shape[0]==M.shape[1]:
raise ValueError("Extracting upper triangle elements supported only on square arrays")
# Extract upper trianglular elements
i = np.tril_indices(M.shape[0])
return M[i]
if len(M.shape)==1:
# M is a vector
# N(N+1)/2 = K
# N(N+1) = 2K
# NN+N = 2K
# NN+N-2K=0
# A x^2 + Bx + C
# -1 +- sqrt(1-4*1*(-2K))
# -----------------------
# 2
#
# (sqrt(1+8*K)-1)/2
K = M.shape[0]
N = (np.sqrt(1+8*K)-1)/2
if N!=round(N):
raise ValueError('Cannot pack %d elements into a square triangular matrix'%K)
N = int(N)
result = np.zeros((N,N))
result[np.tril_indices(N)] = M
return result
raise ValueError("Must be 2D matrix or 1D vector")
def givens_rotation(A):
"""Perform QR decomposition of matrix A using Givens rotation."""
(num_rows, num_cols) = np.shape(A)
# Initialize orthogonal matrix Q and upper triangular matrix R.
Q = np.identity(num_rows)
R = np.copy(A)
# Iterate over lower triangular matrix.
(rows, cols) = np.tril_indices(num_rows, -1, num_cols)
for (row, col) in zip(rows, cols):
# Compute Givens rotation matrix and
# zero-out lower triangular matrix entries.
if R[row, col] != 0:
(c, s) = _givens_rotation_matrix_entries(R[col, col], R[row, col])
G = np.identity(num_rows)
G[[col, row], [col, row]] = c
G[row, col] = s
G[col, row] = -s
R = np.dot(G, R)
Q = np.dot(Q, G.T)
return (Q, R)
distribution_util_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def _fill_lower_triangular(self, x):
"""Numpy implementation of `fill_lower_triangular`."""
x = np.asarray(x)
d = x.shape[-1]
# d = n(n+1)/2 implies n is:
n = int(0.5 * (math.sqrt(1. + 8. * d) - 1.))
ids = np.tril_indices(n)
y = np.zeros(list(x.shape[:-1]) + [n, n], dtype=x.dtype)
y[..., ids[0], ids[1]] = x
return y
def _binary_sim(matrix):
"""Compute a jaccard similarity matrix.
Vecorization based on: https://stackoverflow.com/a/40579567
Parameters
----------
matrix : np.array
Returns
-------
np.array
matrix of shape (n_rows, n_rows) giving the similarity
between rows of the input matrix.
"""
# Generate the indices of the lower triangle of our result matrix.
# The diagonal is offset by -1 because the identity in a similarity
# matrix is always 1.
r, c = np.tril_indices(matrix.shape[0], -1)
# Particularly large groups can blow out memory usage. Chunk the calculation
# into steps that require no more than ~100MB of memory at a time.
# We have 4 2d intermediate arrays in memory at a given time, plus the
# input and output:
# p1 = max_rows * matrix.shape[1]
# p2 = max_rows * matrix.shape[1]
# intersection = max_rows * matrix.shape[1] * 4
# union = max_rows * matrix.shape[1] * 8
# This adds up to:
# memory usage = max_rows * matrix.shape[1] * 14
mem_limit = 100 * pow(2, 20)
max_rows = mem_limit / (14 * matrix.shape[1])
out = np.eye(matrix.shape[0])
for c_batch, r_batch in _batch(c, r, max_rows):
# Use those indices to build two matrices that contains all
# the rows we need to do a similarity comparison on
p1 = matrix[c_batch]
p2 = matrix[r_batch]
# Run the main jaccard calculation
intersection = np.logical_and(p1, p2).sum(1)
union = np.logical_or(p1, p2).sum(1).astype(np.float64)
# Build the result matrix with 1's on the diagonal
# Insert the result of our similarity calculation at their original indices
out[c_batch, r_batch] = intersection / union
# Above only populated half of the matrix, the mirrored diagonal contains
# only zeros. Fix that up by transposing. Adding the transposed matrix double
# counts the diagonal so subtract that back out. We could skip this step and
# leave half the matrix empty, but it takes a fraction of a ms to be correct
# even on mid-sized inputs (~200 queries).
return out + out.T - np.diag(np.diag(out))