def layout_tree(correlation):
"""Layout tree for visualization with e.g. matplotlib.
Args:
correlation: A [V, V]-shaped numpy array of latent correlations.
Returns:
A [V, 3]-shaped numpy array of spectral positions of vertices.
"""
assert len(correlation.shape) == 2
assert correlation.shape[0] == correlation.shape[1]
assert correlation.dtype == np.float32
laplacian = -correlation
np.fill_diagonal(laplacian, 0)
np.fill_diagonal(laplacian, -laplacian.sum(axis=0))
evals, evects = scipy.linalg.eigh(laplacian, eigvals=[1, 2, 3])
assert np.all(evals > 0)
assert evects.shape[1] == 3
return evects
python类fill_diagonal()的实例源码
def test_diagonal_mpa(nr_sites, local_dim, _, rgen, dtype):
randfunc = factory._randfuncs[dtype]
entries = randfunc((local_dim,), randstate=rgen)
mpa_mp = factory.diagonal_mpa(entries, nr_sites)
if nr_sites > 1:
mpa_np = np.zeros((local_dim,) * nr_sites, dtype=dtype)
np.fill_diagonal(mpa_np, entries)
else:
mpa_np = entries
assert len(mpa_mp) == nr_sites
assert mpa_mp.dtype is dtype
assert_array_almost_equal(mpa_mp.to_array(), mpa_np)
assert_correct_normalization(mpa_mp, nr_sites - 1, nr_sites)
if nr_sites > 1:
assert max(mpa_mp.ranks) == local_dim
def __parse_pairs__(self, filepath, delimiter = ',', target_col = 2, column_names = list(), sequence_length = None):
assert("target" in column_names)
with open(filepath, "r") as f:
lines = f.readlines()
try:
if sequence_length is None:
dataframe = pd.read_csv(filepath, sep = delimiter, skip_blank_lines = True,
header = None, names = column_names, index_col = False)
sequence_length = np.asarray(dataframe[["i", "j"]]).max()
except ValueError:
return None
data = np.full((sequence_length, sequence_length), np.nan, dtype = np.double)
np.fill_diagonal(data, Params.DISTANCE_WITH_ITSELF)
for line in lines:
elements = line.rstrip("\r\n").split(delimiter)
i, j, k = int(elements[0]) - 1, int(elements[1]) - 1, float(elements[target_col])
data[i, j] = data[j, i] = k
if np.isnan(data).any():
# sequence_length is wrong or the input file has missing pairs
warnings.warn("Warning: Pairs of residues are missing from the contacts text file")
warnings.warn("Number of missing pairs: %i " % np.isnan(data).sum())
return data
def getW(D, K, Mu = 0.5):
"""
Return affinity matrix
[1] Wang, Bo, et al. "Similarity network fusion for aggregating data types on a genomic scale."
Nature methods 11.3 (2014): 333-337.
:param D: Self-similarity matrix
:param K: Number of nearest neighbors
"""
#W(i, j) = exp(-Dij^2/(mu*epsij))
DSym = 0.5*(D + D.T)
np.fill_diagonal(DSym, 0)
Neighbs = np.partition(DSym, K+1, 1)[:, 0:K+1]
MeanDist = np.mean(Neighbs, 1)*float(K+1)/float(K) #Need this scaling
#to exclude diagonal element in mean
#Equation 1 in SNF paper [1] for estimating local neighborhood radii
#by looking at k nearest neighbors, not including point itself
Eps = MeanDist[:, None] + MeanDist[None, :] + DSym
Eps = Eps/3
W = np.exp(-DSym**2/(2*(Mu*Eps)**2))
return W
def jaccard_similarity_weighted(F, fill_diagonal=True):
assert F.format == 'csr'
if not F.has_sorted_indices:
F.sort_indices()
ind = F.indices
ptr = F.indptr
dat = F.data.astype(np.float64, copy=False) # dtype needed for jaccard computation
shift = 1 if fill_diagonal else 0
data, rows, cols = _jaccard_similarity_weighted_tri(dat, ind, ptr, shift)
S = sp.sparse.coo_matrix((data, (rows, cols)), shape=(F.shape[0],)*2).tocsc()
S += S.T # doubles diagonal values if fill_diagonal is False
if fill_diagonal:
set_diagonal_values(S, 1)
else:
set_diagonal_values(S, np.sign(S.diagonal())) # set to 1, preserve zeros
return S
def select_negtive(self, i_feat, s_feat, sess, topN=50):
'''
Select the triplets with the largest losses \n
return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
'''
feed_dict = {self.image_feat: i_feat, self.sentence_feat:s_feat}
i_embed, s_embed = sess.run([self.image_fc2, self.sentence_fc2], feed_dict=feed_dict)
S = np.matmul(i_embed, s_embed.T)
i_feat_pos = i_feat.repeat(topN, axis=0)
s_feat_pos = s_feat.repeat(topN, axis=0)
N = S.shape[0]
np.fill_diagonal(S, -2*np.ones(N))
neg_s_idx = S.argsort(axis=1)[:, -topN:]
neg_i_idx = S.argsort(axis=0)[-topN:, :]
s_feat_neg = s_feat[neg_s_idx.flatten('C')]
i_feat_neg = i_feat[neg_i_idx.flatten('F')]
return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
def select_negtive(self, i_feat, s_feat, sess, topN=50):
'''
Select the triplets with the largest losses \n
return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
'''
feed_dict = {self.image_feat: i_feat, self.sentence_feat:s_feat}
i_embed, s_embed = sess.run([self.image_fc2, self.sentence_fc2], feed_dict=feed_dict)
S = np.matmul(i_embed, s_embed.T)
i_feat_pos = i_feat.repeat(topN, axis=0)
s_feat_pos = s_feat.repeat(topN, axis=0)
N = S.shape[0]
np.fill_diagonal(S, -2*np.ones(N))
neg_s_idx = S.argsort(axis=1)[:, -topN:]
neg_i_idx = S.argsort(axis=0)[-topN:, :]
s_feat_neg = s_feat[neg_s_idx.flatten('C')]
i_feat_neg = i_feat[neg_i_idx.flatten('F')]
return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
def compute_sims(inputs: mx.nd.NDArray, normalize: bool) -> mx.nd.NDArray:
"""
Returns a matrix with pair-wise similarity scores between inputs.
Similarity score is (normalized) Euclidean distance. 'Similarity with self' is masked
to large negative value.
:param inputs: NDArray of inputs.
:param normalize: Whether to normalize to unit-length.
:return: NDArray with pairwise similarities of same shape as inputs.
"""
if normalize:
logger.info("Normalizing embeddings to unit length")
inputs = mx.nd.L2Normalization(inputs, mode='instance')
sims = mx.nd.dot(inputs, inputs, transpose_b=True)
sims_np = sims.asnumpy()
np.fill_diagonal(sims_np, -9999999.)
sims = mx.nd.array(sims_np)
return sims
def sharpenOld(s, kernelFunc, dist=None, scale=None,
normalize=False, m1=False, *args, **kwargs):
s = util.colmat(s)
if dist is None:
dist = np.arange(s.shape[1])+1.0
dist = np.abs(dist[None,:]-dist[:,None])
#dist = np.insert(spsig.triang(s.shape[1]-1, sym=False), 0, 0.0)
#dist = np.vstack([np.roll(dist, i) for i in xrange(dist.size)])
if scale is None:
# minimum off-diagonal distance
scale = np.min(dist[np.asarray(1.0-np.eye(dist.shape[0]), dtype=np.bool)])
kernel = kernelFunc(dist.T/scale, *args, **kwargs)
if m1:
np.fill_diagonal(kernel, 0.0)
if normalize:
kernel = kernel/np.abs(kernel.sum(axis=0))
return s - s.dot(kernel)
def load_data(filename):
df = pd.read_csv(filename, compression='zip')
selected = ['Category', 'Descript']
non_selected = list(set(df.columns) - set(selected))
df = df.drop(non_selected, axis=1)
df = df.dropna(axis=0, how='any', subset=selected)
df = df.reindex(np.random.permutation(df.index))
labels = sorted(list(set(df[selected[0]].tolist())))
num_labels = len(labels)
one_hot = np.zeros((num_labels, num_labels), int)
np.fill_diagonal(one_hot, 1)
label_dict = dict(zip(labels, one_hot))
x_raw= df[selected[1]].apply(lambda x: clean_str(x).split(' ')).tolist()
y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()
x_raw = pad_sentences(x_raw)
vocabulary, vocabulary_inv = build_vocab(x_raw)
x = np.array([[vocabulary[word] for word in sentence] for sentence in x_raw])
y = np.array(y_raw)
return x, y, vocabulary, vocabulary_inv, df, labels
def load_test_data(test_file, labels):
df = pd.read_csv(test_file, sep='|')
select = ['Descript']
df = df.dropna(axis=0, how='any', subset=select)
test_examples = df[select[0]].apply(lambda x: data_helper.clean_str(x).split(' ')).tolist()
num_labels = len(labels)
one_hot = np.zeros((num_labels, num_labels), int)
np.fill_diagonal(one_hot, 1)
label_dict = dict(zip(labels, one_hot))
y_ = None
if 'Category' in df.columns:
select.append('Category')
y_ = df[select[1]].apply(lambda x: label_dict[x]).tolist()
not_select = list(set(df.columns) - set(select))
df = df.drop(not_select, axis=1)
return test_examples, y_, df
BidirectionNet_tfidf.py 文件源码
项目:Sohu-LuckData-Image-Text-Matching-Competition
作者: WeitaoVan
项目源码
文件源码
阅读 37
收藏 0
点赞 0
评论 0
def select_negtive(self, i_feat, s_feat, sess, topN=50):
'''
Select the triplets with the largest losses \n
return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
'''
feed_dict = {self.image_feat: i_feat, self.sentence_feat:s_feat}
i_embed, s_embed = sess.run([self.image_fc2, self.sentence_fc2], feed_dict=feed_dict)
S = np.matmul(i_embed, s_embed.T)
i_feat_pos = i_feat.repeat(topN, axis=0)
s_feat_pos = s_feat.repeat(topN, axis=0)
N = S.shape[0]
np.fill_diagonal(S, -2*np.ones(N))
neg_s_idx = S.argsort(axis=1)[:, -topN:]
neg_i_idx = S.argsort(axis=0)[-topN:, :]
s_feat_neg = s_feat[neg_s_idx.flatten('C')]
i_feat_neg = i_feat[neg_i_idx.flatten('F')]
return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
BidirectionNet_4wtfidf.py 文件源码
项目:Sohu-LuckData-Image-Text-Matching-Competition
作者: WeitaoVan
项目源码
文件源码
阅读 44
收藏 0
点赞 0
评论 0
def select_negtive(self, i_feat, s_feat, sess, topN=50):
'''
Select the triplets with the largest losses \n
return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
'''
feed_dict = {self.image_feat: i_feat, self.sentence_feat:s_feat}
i_embed, s_embed = sess.run([self.image_fc2, self.sentence_fc2], feed_dict=feed_dict)
S = np.matmul(i_embed, s_embed.T)
i_feat_pos = i_feat.repeat(topN, axis=0)
s_feat_pos = s_feat.repeat(topN, axis=0)
N = S.shape[0]
np.fill_diagonal(S, -2*np.ones(N))
neg_s_idx = S.argsort(axis=1)[:, -topN:]
neg_i_idx = S.argsort(axis=0)[-topN:, :]
s_feat_neg = s_feat[neg_s_idx.flatten('C')]
i_feat_neg = i_feat[neg_i_idx.flatten('F')]
return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
def get_gcovmat(h2, rg):
"""
Args: h2: vector with SNP heritabilities
rg: vector with genetic correlations
Returns: numpy trait by trait array with h2 on diagonal and genetic covariance on offdiagnoals
"""
mat = numpy.zeros((len(h2), len(h2)))
mat[numpy.triu_indices(len(h2), 1)] = rg
mat = mat + mat.T
mat = mat * numpy.sqrt(numpy.outer(h2, h2))
numpy.fill_diagonal(mat, h2)
return numpy.array(mat)
# When input files are score files, not beta files, mtot may be unknown.
# Here mtot=1e6 is assumed. The absolute value of the expected variances for each trait is irrelevant for the multi-trait weighting, so it doesn't matter too much what this value is, expecially if M > N.
def ols_weights(n, h2, rg, mtot=1e6):
"""
Args: n: vector with sample size for each trait
h2: vector with SNP heritabilities
rg: vector with rg for each pair of traits (3 traits: 1,2; 1,3; 2,3)
mtot: total number of markers (doesn't change result much)
Returns: ntraits * ntraits array with ols weights. weights in each row are for are for a multi-trait predictor of the trait in this row
"""
ntraits = len(n)
gcovmat = get_gcovmat(h2, rg)
print(gcovmat)
V = gcovmat / mtot
numpy.fill_diagonal(V, ols_variances(n, h2, mtot))
C = gcovmat / mtot
weights = numpy.zeros([ntraits, ntraits])
for i in range(ntraits):
nonzero = V[i,] != 0
Vi = V[numpy.array(numpy.where(nonzero)[0])[:, None], nonzero]
Vinv = numpy.linalg.inv(Vi)
weights[i, nonzero] = numpy.dot(Vinv, C[i, nonzero])
print(weights)
return weights
def __get_prolongation_matrix(ndofs_coarse, ndofs_fine):
"""Helper routine for the prolongation operator
Args:
ndofs_fine (int): number of DOFs on the fine grid
ndofs_coarse (int): number of DOFs on the coarse grid
Returns:
scipy.sparse.csc_matrix: sparse prolongation matrix of size
`ndofs_fine` x `ndofs_coarse`
"""
# This is a workaround, since I am not aware of a suitable way to do
# this directly with sparse matrices.
P = np.zeros((ndofs_fine, ndofs_coarse))
np.fill_diagonal(P[1::2, :], 1)
np.fill_diagonal(P[0::2, :], 1.0/2.0)
np.fill_diagonal(P[2::2, :], 1.0/2.0)
return sp.csc_matrix(P)
def _select_target_neighbors(self):
"""Find the target neighbors of each sample, that stay fixed during training.
Returns
-------
array_like
An array of neighbors indices for each sample with shape (n_samples, n_neighbors).
"""
self.logger.info('Finding target neighbors...')
target_neighbors = np.empty((self.X_.shape[0], self.n_neighbors_), dtype=int)
for class_ in self.classes_:
class_ind, = np.where(np.equal(self.y_, class_))
dist = euclidean_distances(self.X_[class_ind], squared=True)
np.fill_diagonal(dist, np.inf)
neigh_ind = np.argpartition(dist, self.n_neighbors_ - 1, axis=1)
neigh_ind = neigh_ind[:, :self.n_neighbors_]
# argpartition doesn't guarantee sorted order, so we sort again but only the k neighbors
row_ind = np.arange(len(class_ind))[:, None]
neigh_ind = neigh_ind[row_ind, np.argsort(dist[row_ind, neigh_ind])]
target_neighbors[class_ind] = class_ind[neigh_ind]
return target_neighbors
def set_diagonal(G, val=0):
"""
Generally diagonal is set to 0. This function helps set the diagonal across time.
**PARAMETERS**
:G: temporal network (graphlet)
:val: value to set diagonal to (default 0).
**OUTPUT**
:G: Graphlet representation of G with new diagonal
**HISTORY**
:Modified: Dec 2016, WHT (documentation)
:Created: Nov 2016, WHT
"""
for t in range(0, G.shape[2]):
np.fill_diagonal(G[:, :, t], val)
return G
def newScore(movie):
critic_num = len(token_dict[movie["movieTitle"]]["critics"])
N = len(token_dict[movie["movieTitle"]]["reviews"])
C = cosine[movie["movieTitle"]][critic_num:, critic_num:]
R = map(lambda x: x['score'], movie['reviews'])
print C.shape
# exclude self similarity
# np.fill_diagonal(C, 0)
# normalize
row_sums = C.sum(axis=1)
C = C / row_sums[:, np.newaxis]
# calculate new score
new_score = np.dot(C, R)
# update new score
new_review = movie['reviews']
map(lambda x, y: x.update({'newScore': y}), new_review, new_score)
testing = map(lambda x: abs(x['score'] - x['newScore']) < 5, new_review)
print np.sum(testing)
return new_review
def get_masked(self, percent_hole, diag_off=1):
""" Construct a random mask.
Random training set on 20% on Data / debug5 - debug11 -- Unbalanced
"""
data = self.data
if type(data) is np.ndarray:
#self.data_mat = sp.sparse.csr_matrix(data)
pass
else:
raise NotImplementedError('type %s unknow as corpus' % type(data))
n = int(data.size * percent_hole)
mask_index = np.unravel_index(np.random.permutation(data.size)[:n], data.shape)
mask = np.zeros(data.shape, dtype=data.dtype)
mask[mask_index] = 1
if self.is_symmetric():
mask = np.tril(mask) + np.tril(mask, -1).T
data_ma = ma.array(data, mask=mask)
if diag_off == 1:
np.fill_diagonal(data_ma, ma.masked)
return data_ma
def get_masked_zeros(self, diag_off=1):
''' Take out all zeros '''
data = self.data
if type(data) is np.ndarray:
#self.data_mat = sp.sparse.csr_matrix(data)
pass
else:
raise NotImplementedError('type %s unknow as corpus' % type(data))
mask = np.zeros(data.shape, dtype=data.dtype)
mask[data == 0] = 1
if self.is_symmetric():
mask = np.tril(mask) + np.tril(mask, -1).T
data_ma = ma.array(data, mask=mask)
if diag_off == 1:
np.fill_diagonal(data_ma, ma.masked)
return data_ma
def _solve_hessian(G, Y, thY, precon, lambda_min):
N, T = Y.shape
# Compute the derivative of the score
psidY = ne.evaluate('(- thY ** 2 + 1.) / 2.') # noqa
# Build the diagonal of the Hessian, a.
Y_squared = Y ** 2
if precon == 2:
a = np.inner(psidY, Y_squared) / float(T)
elif precon == 1:
sigma2 = np.mean(Y_squared, axis=1)
psidY_mean = np.mean(psidY, axis=1)
a = psidY_mean[:, None] * sigma2[None, :]
diagonal_term = np.mean(Y_squared * psidY) + 1.
a[np.diag_indices_from(a)] = diagonal_term
else:
raise ValueError('precon should be 1 or 2')
# Compute the eigenvalues of the Hessian
eigenvalues = 0.5 * (a + a.T - np.sqrt((a - a.T) ** 2 + 4.))
# Regularize
problematic_locs = eigenvalues < lambda_min
np.fill_diagonal(problematic_locs, False)
i_pb, j_pb = np.where(problematic_locs)
a[i_pb, j_pb] += lambda_min - eigenvalues[i_pb, j_pb]
# Invert the transform
return (G * a.T - G.T) / (a * a.T - 1.)
def grad(self, inp, cost_grad):
"""
Notes
-----
The gradient is currently implemented for matrices only.
"""
a, val = inp
grad = cost_grad[0]
if (a.dtype.startswith('complex')):
return [None, None]
elif a.ndim > 2:
raise NotImplementedError('%s: gradient is currently implemented'
' for matrices only' %
self.__class__.__name__)
wr_a = fill_diagonal(grad, 0) # valid for any number of dimensions
# diag is only valid for matrices
wr_val = theano.tensor.nlinalg.diag(grad).sum()
return [wr_a, wr_val]
def test_perform(self):
x = tensor.matrix()
y = tensor.scalar()
f = function([x, y], fill_diagonal(x, y))
for shp in [(8, 8), (5, 8), (8, 5)]:
a = numpy.random.rand(*shp).astype(config.floatX)
val = numpy.cast[config.floatX](numpy.random.rand())
out = f(a, val)
# We can't use numpy.fill_diagonal as it is bugged.
assert numpy.allclose(numpy.diag(out), val)
assert (out == val).sum() == min(a.shape)
# test for 3d tensor
a = numpy.random.rand(3, 3, 3).astype(config.floatX)
x = tensor.tensor3()
y = tensor.scalar()
f = function([x, y], fill_diagonal(x, y))
val = numpy.cast[config.floatX](numpy.random.rand() + 10)
out = f(a, val)
# We can't use numpy.fill_diagonal as it is bugged.
assert out[0, 0, 0] == val
assert out[1, 1, 1] == val
assert out[2, 2, 2] == val
assert (out == val).sum() == min(a.shape)
def test_perform(self):
x = tensor.matrix()
y = tensor.scalar()
z = tensor.iscalar()
f = function([x, y, z], fill_diagonal_offset(x, y, z))
for test_offset in (-5, -4, -1, 0, 1, 4, 5):
for shp in [(8, 8), (5, 8), (8, 5), (5, 5)]:
a = numpy.random.rand(*shp).astype(config.floatX)
val = numpy.cast[config.floatX](numpy.random.rand())
out = f(a, val, test_offset)
# We can't use numpy.fill_diagonal as it is bugged.
assert numpy.allclose(numpy.diag(out, test_offset), val)
if test_offset >= 0:
assert (out == val).sum() == min(min(a.shape),
a.shape[1] - test_offset)
else:
assert (out == val).sum() == min(min(a.shape),
a.shape[0] + test_offset)
def constant(self):
delta = np.min(self.rho) - 0.01
cormat = np.full((self.nkdim, self.nkdim), delta)
epsilon = 0.99 - np.max(self.rho)
for i in np.arange(self.k):
cor = np.full((self.nk[i], self.nk[i]), self.rho[i])
if i == 0:
cormat[0:self.nk[0], 0:self.nk[0]] = cor
if i != 0:
cormat[np.sum(self.nk[0:i]):np.sum(self.nk[0:i + 1]),
np.sum(self.nk[0:i]):np.sum(self.nk[0:i + 1])] = cor
np.fill_diagonal(cormat, 1 - epsilon)
cormat = self._generate_noise(cormat, self.nkdim, self.M, epsilon)
return cormat
def toepz(self):
cormat = np.zeros((self.nkdim, self.nkdim))
epsilon = (1 - np.max(self.rho)) / (1 + np.max(self.rho)) - .01
for i in np.arange(self.k):
t = np.insert(np.power(self.rho[i], np.arange(1, self.nk[i])), 0, 1)
cor = toeplitz(t)
if i == 0:
cormat[0:self.nk[0], 0:self.nk[0]] = cor
if i != 0:
cormat[np.sum(self.nk[0:i]):np.sum(self.nk[0:i + 1]),
np.sum(self.nk[0:i]):np.sum(self.nk[0:i + 1])] = cor
np.fill_diagonal(cormat, 1 - epsilon)
cormat = self._generate_noise(cormat, self.nkdim, self.M, epsilon)
return cormat
def hub(self):
cormat = np.zeros((self.nkdim, self.nkdim))
for i in np.arange(self.k):
cor = toeplitz(self._fill_hub_matrix(self.rho[i,0],self.rho[i,1], self.power, self.nk[i]))
if i == 0:
cormat[0:self.nk[0], 0:self.nk[0]] = cor
if i != 0:
cormat[np.sum(self.nk[0:i]):np.sum(self.nk[0:i + 1]),
np.sum(self.nk[0:i]):np.sum(self.nk[0:i + 1])] = cor
tau = (np.max(self.rho[i]) - np.min(self.rho[i])) / (self.nk[i] - 2)
epsilon = 0.08 #(1 - np.min(rho) - 0.75 * np.min(tau)) - 0.01
np.fill_diagonal(cormat, 1 - epsilon)
cormat = self._generate_noise(cormat, self.nkdim, self.M, epsilon)
return cormat
def load_data_and_labels():
articles = np.load('data/bin/all_articles.npy')
labels = np.load('data/bin/all_labels.npy')
articles = [clean_str(article) for article in articles]
# Map the actual labels to one hot labels
label_list = sorted(list(set(labels)))
one_hot = np.zeros((len(label_list), len(label_list)), int)
np.fill_diagonal(one_hot, 1)
label_dict = dict(zip(label_list, one_hot))
labels = one_hot_encode(labels, label_dict)
x_raw = articles
y_raw = labels
return x_raw, y_raw, label_list
def fill_diagonal_(mat: T.Tensor, val: T.Scalar) -> T.Tensor:
"""
Fill the diagonal of the matirx with a specified value.
Note:
Modifies mat in place.
Args:
mat: A tensor.
val: The value to put along the diagonal.
Returns:
None
"""
numpy.fill_diagonal(mat, val)