def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
python类spacing()的实例源码
def validate_cov_matrix(M):
M = (M + M.T) * 0.5
k = 0
I = np.eye(M.shape[0])
while True:
try:
_ = np.linalg.cholesky(M)
break
except np.linalg.LinAlgError:
# Find the nearest positive definite matrix for M. Modified from
# http://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
# Might take several minutes
k += 1
w, v = np.linalg.eig(M)
min_eig = v.min()
M += (-min_eig * k * k + np.spacing(min_eig)) * I
return M
def __init__(self, data_manager, t_layer_sizes, p_layer_sizes, dropout=0):
print('{:25}'.format("Initializing Model"), end='', flush=True)
self.t_layer_sizes = t_layer_sizes
self.p_layer_sizes = p_layer_sizes
self.dropout = dropout
self.data_manager = data_manager
self.t_input_size = self.data_manager.f.feature_count
self.output_size = self.data_manager.s.information_count
self.time_model = StackedCells(self.t_input_size, celltype=LSTM, layers = t_layer_sizes)
self.time_model.layers.append(PassthroughLayer())
p_input_size = t_layer_sizes[-1] + self.output_size
self.pitch_model = StackedCells( p_input_size, celltype=LSTM, layers = p_layer_sizes)
self.pitch_model.layers.append(Layer(p_layer_sizes[-1], self.output_size, activation = T.nnet.sigmoid))
self.conservativity = T.fscalar()
self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))
self.epsilon = np.spacing(np.float32(1.0))
print("Done")
def _logL(distr, y, y_hat):
"""The log likelihood."""
if distr in ['softplus', 'poisson']:
eps = np.spacing(1)
logL = np.sum(y * np.log(y_hat + eps) - y_hat)
elif distr == 'gaussian':
logL = -0.5 * np.sum((y - y_hat)**2)
elif distr == 'binomial':
# analytical formula
logL = np.sum(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat))
# but this prevents underflow
# z = beta0 + np.dot(X, beta)
# logL = np.sum(y * z - np.log(1 + np.exp(z)))
elif distr == 'probit':
logL = np.sum(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat))
elif distr == 'gamma':
# see
# https://www.statistics.ma.tum.de/fileadmin/w00bdb/www/czado/lec8.pdf
nu = 1. # shape parameter, exponential for now
logL = np.sum(nu * (-y / y_hat - np.log(y_hat)))
return logL
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def unitize(v):
"""
UNIT Unitize a vector
:param v: given unit vector
:return: a unit-vector parallel to V.
Reports error for the case where V is non-symbolic and norm(V) is zero
"""
n = np.linalg.norm(v, "fro")
# Todo ISA
if n > np.spacing([1])[0]:
return v / n
else:
raise AttributeError("Vector has zero norm")
# ---------------------------------------------------------------------------------------#
def SID(s1, s2):
"""
Computes the spectral information divergence between two vectors.
Parameters:
s1: `numpy array`
The first vector.
s2: `numpy array`
The second vector.
Returns: `float`
Spectral information divergence between s1 and s2.
Reference
C.-I. Chang, "An Information-Theoretic Approach to SpectralVariability,
Similarity, and Discrimination for Hyperspectral Image"
IEEE TRANSACTIONS ON INFORMATION THEORY, VOL. 46, NO. 5, AUGUST 2000.
"""
p = (s1 / np.sum(s1)) + np.spacing(1)
q = (s2 / np.sum(s2)) + np.spacing(1)
return np.sum(p * np.log(p / q) + q * np.log(q / p))
test_half.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def laplacian(W, normalized=True):
"""Return the Laplacian of the weigth matrix."""
# Degree matrix.
d = W.sum(axis=0)
# Laplacian matrix.
if not normalized:
D = scipy.sparse.diags(d.A.squeeze(), 0)
L = D - W
else:
d += np.spacing(np.array(0, W.dtype))
d = 1 / np.sqrt(d)
D = scipy.sparse.diags(d.A.squeeze(), 0)
I = scipy.sparse.identity(d.size, dtype=W.dtype)
L = I - D * W * D
# assert np.abs(L - L.T).mean() < 1e-9
assert type(L) is scipy.sparse.csr.csr_matrix
return L
def sensitivity(Ntp, Nfn, eps=numpy.spacing(1)):
"""Sensitivity
Wikipedia entry https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Parameters
----------
Ntp : int >=0
Number of true positives
Nfn : int >=0
Number of false negatives
eps : float
eps
(Default value=numpy.spacing(1))
Returns
-------
sensitivity: float
Sensitivity
"""
return float(Ntp / (Ntp + Nfn + eps))
def specificity(Ntn, Nfp, eps=numpy.spacing(1)):
"""Specificity
Wikipedia entry https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Parameters
----------
Ntn : int >= 0
Number of true negatives
Nfp : int >= 0
Number of false positives
eps : float
eps
(Default value=numpy.spacing(1))
Returns
-------
specificity: float
Specificity
"""
return float(Ntn / (Ntn + Nfp + eps))
def substitution_rate(Nref, Nsubstitutions, eps=numpy.spacing(1)):
"""Substitution rate
Parameters
----------
Nref : int >=0
Number of entries in the reference
Nsubstitutions : int >=0
Number of substitutions
eps : float
eps
(Default value=numpy.spacing(1))
Returns
-------
substitution_rate: float
Substitution rate
"""
return float(Nsubstitutions / (Nref + eps))
def deletion_rate(Nref, Ndeletions, eps=numpy.spacing(1)):
"""Deletion rate
Parameters
----------
Nref : int >=0
Number of entries in the reference
Ndeletions : int >=0
Number of deletions
eps : float
eps
(Default value=numpy.spacing(1))
Returns
-------
deletion_rate: float
Deletion rate
"""
return float(Ndeletions / (Nref + eps))
def insertion_rate(Nref, Ninsertions, eps=numpy.spacing(1)):
"""Insertion rate
Parameters
----------
Nref : int >=0
Number of entries in the reference
Ninsertions : int >=0
Number of insertions
eps : float
eps
(Default value=numpy.spacing(1))
Returns
-------
insertion_rate: float
Insertion rate
"""
return float(Ninsertions / (Nref + eps))
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def feat_eog(signals):
"""
calculate the EOG features
:param signals: 1D or 2D signals
"""
if signals.ndim == 1: signals = np.expand_dims(signals,0)
sfreq = use_sfreq
nsamp = float(signals.shape[1])
w = (fft(signals,axis=1)).real
feats = np.zeros((signals.shape[0],15),dtype='float32')
delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
beta = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1) # only until 50, because hz=100
sum_abs_pow = delta + theta + alpha + beta + gamma
feats[:,0] = delta /sum_abs_pow
feats[:,1] = theta /sum_abs_pow
feats[:,2] = alpha /sum_abs_pow
feats[:,3] = beta /sum_abs_pow
feats[:,4] = gamma /sum_abs_pow
feats[:,5] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5) #smean
feats[:,6] = np.sqrt(np.max(signals, axis=1)) #PAV
feats[:,7] = np.sqrt(np.abs(np.min(signals, axis=1))) #VAV
feats[:,8] = np.argmax(signals, axis=1)/nsamp #PAP
feats[:,9] = np.argmin(signals, axis=1)/nsamp #VAP
feats[:,10] = np.sqrt(np.sum(np.abs(signals), axis=1)/ np.mean(np.sum(np.abs(signals), axis=1))) # AUC
feats[:,11] = np.sum(((np.roll(np.sign(signals), 1,axis=1) - np.sign(signals)) != 0).astype(int),axis=1)/nsamp #TVC
feats[:,12] = np.log10(np.std(signals, axis=1)) #STD/VAR
feats[:,13] = np.log10(stats.kurtosis(signals,fisher=False,axis=1)) # kurtosis
feats[:,14] = np.log10(-np.sum([(x/nsamp)*((np.log((x+np.spacing(1))/nsamp))) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1)) # entropy.. yay, one line...
if np.any(feats==np.nan): print('NaN detected')
return np.nan_to_num(feats)
def feat_emg(signals):
"""
calculate the EMG median as defined by Leangkvist (2012),
"""
if signals.ndim == 1: signals = np.expand_dims(signals,0)
sfreq = use_sfreq
nsamp = float(signals.shape[1])
w = (fft(signals,axis=1)).real
feats = np.zeros((signals.shape[0],13),dtype='float32')
delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
beta = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1) # only until 50, because hz=100
sum_abs_pow = delta + theta + alpha + beta + gamma
feats[:,0] = delta /sum_abs_pow
feats[:,1] = theta /sum_abs_pow
feats[:,2] = alpha /sum_abs_pow
feats[:,3] = beta /sum_abs_pow
feats[:,4] = gamma /sum_abs_pow
feats[:,5] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5) #smean
emg = np.sum(np.abs(w[:,np.arange(12.5*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1)
feats[:,6] = emg / np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1) # ratio of high freq to total motor
feats[:,7] = np.median(np.abs(w[:,np.arange(8*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1) # median freq
feats[:,8] = np.mean(np.abs(w[:,np.arange(8*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1) # mean freq
feats[:,9] = np.std(signals, axis=1) # std
feats[:,10] = np.mean(signals,axis=1)
feats[:,11] = np.log10(stats.kurtosis(signals,fisher=False,axis=1) )
feats[:,12] = np.log10(-np.sum([(x/nsamp)*((np.log((x+np.spacing(1))/nsamp))) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1)) # entropy.. yay, one line...
if np.any(feats==np.nan): print('NaN detected')
return np.nan_to_num(feats)
def _test_spacing(t):
one = t(1)
eps = np.finfo(t).eps
nan = t(np.nan)
inf = t(np.inf)
with np.errstate(invalid='ignore'):
assert_(np.spacing(one) == eps)
assert_(np.isnan(np.spacing(nan)))
assert_(np.isnan(np.spacing(inf)))
assert_(np.isnan(np.spacing(-inf)))
assert_(np.spacing(t(1e30)) != 0)
def test_spacing_gfortran():
# Reference from this fortran file, built with gfortran 4.3.3 on linux
# 32bits:
# PROGRAM test_spacing
# INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37)
# INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200)
#
# WRITE(*,*) spacing(0.00001_DBL)
# WRITE(*,*) spacing(1.0_DBL)
# WRITE(*,*) spacing(1000._DBL)
# WRITE(*,*) spacing(10500._DBL)
#
# WRITE(*,*) spacing(0.00001_SGL)
# WRITE(*,*) spacing(1.0_SGL)
# WRITE(*,*) spacing(1000._SGL)
# WRITE(*,*) spacing(10500._SGL)
# END PROGRAM
ref = {np.float64: [1.69406589450860068E-021,
2.22044604925031308E-016,
1.13686837721616030E-013,
1.81898940354585648E-012],
np.float32: [9.09494702E-13,
1.19209290E-07,
6.10351563E-05,
9.76562500E-04]}
for dt, dec_ in zip([np.float32, np.float64], (10, 20)):
x = np.array([1e-5, 1, 1000, 10500], dtype=dt)
assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_)
def test_nextafter_vs_spacing():
# XXX: spacing does not handle long double yet
for t in [np.float32, np.float64]:
for _f in [1, 1e-5, 1000]:
f = t(_f)
f1 = t(_f + 1)
assert_(np.nextafter(f, f1) - f == np.spacing(f))
def test_nans_infs(self):
with np.errstate(all='ignore'):
# Check some of the ufuncs
assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32))
assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32))
assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32))
assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32))
assert_equal(np.spacing(float16(65504)), np.inf)
# Check comparisons of all values with NaN
nan = float16(np.nan)
assert_(not (self.all_f16 == nan).any())
assert_(not (nan == self.all_f16).any())
assert_((self.all_f16 != nan).all())
assert_((nan != self.all_f16).all())
assert_(not (self.all_f16 < nan).any())
assert_(not (nan < self.all_f16).any())
assert_(not (self.all_f16 <= nan).any())
assert_(not (nan <= self.all_f16).any())
assert_(not (self.all_f16 > nan).any())
assert_(not (nan > self.all_f16).any())
assert_(not (self.all_f16 >= nan).any())
assert_(not (nan >= self.all_f16).any())
def KL(a, b):
"""Calculate the Kullback Leibler divergence between a and b """
D_KL = np.nansum(np.multiply(a, np.log(np.divide(a, b+np.spacing(1)))), axis=1)
return D_KL
def calc_information(probTgivenXs, PYgivenTs, PXs, PYs):
"""Calculate the MI - I(X;T) and I(Y;T)"""
PTs = np.nansum(probTgivenXs*PXs, axis=1)
Ht = np.nansum(-np.dot(PTs, np.log2(PTs)))
Htx = - np.nansum((np.dot(np.multiply(probTgivenXs, np.log2(probTgivenXs)), PXs)))
Hyt = - np.nansum(np.dot(PYgivenTs*np.log2(PYgivenTs+np.spacing(1)), PTs))
Hy = np.nansum(-PYs * np.log2(PYs+np.spacing(1)))
IYT = Hy - Hyt
ITX = Ht - Htx
return ITX, IYT
def calc_information_1(probTgivenXs, PYgivenTs, PXs, PYs, PTs):
"""Calculate the MI - I(X;T) and I(Y;T)"""
#PTs = np.nansum(probTgivenXs*PXs, axis=1)
Ht = np.nansum(-np.dot(PTs, np.log2(PTs+np.spacing(1))))
Htx = - np.nansum((np.dot(np.multiply(probTgivenXs, np.log2(probTgivenXs+np.spacing(1))), PXs)))
Hyt = - np.nansum(np.dot(PYgivenTs*np.log2(PYgivenTs+np.spacing(1)), PTs))
Hy = np.nansum(-PYs * np.log2(PYs+np.spacing(1)))
IYT = Hy - Hyt
ITX = Ht - Htx
return ITX, IYT
def calc_information(probTgivenXs, PYgivenTs, PXs, PYs, PTs):
"""Calculate the MI - I(X;T) and I(Y;T)"""
#PTs = np.nansum(probTgivenXs*PXs, axis=1)
t_indeces = np.nonzero(PTs)
Ht = np.nansum(-np.dot(PTs, np.log2(PTs+np.spacing(1))))
Htx = - np.nansum((np.dot(np.multiply(probTgivenXs, np.log2(probTgivenXs)), PXs)))
Hyt = - np.nansum(np.dot(PYgivenTs*np.log2(PYgivenTs+np.spacing(1)), PTs))
Hy = np.nansum(-PYs * np.log2(PYs+np.spacing(1)))
IYT = Hy - Hyt
ITX = Ht - Htx
return ITX, IYT
def t_calc_information(p_x_given_t, PYgivenTs, PXs, PYs):
"""Calculate the MI - I(X;T) and I(Y;T)"""
Hx = np.nansum(-np.dot(PXs, np.log2(PXs)))
Hxt = - np.nansum((np.dot(np.multiply(p_x_given_t, np.log2(p_x_given_t)), PXs)))
Hyt = - np.nansum(np.dot(PYgivenTs*np.log2(PYgivenTs+np.spacing(1)), PTs))
Hy = np.nansum(-PYs * np.log2(PYs+np.spacing(1)))
IYT = Hy - Hyt
ITX = Hx - Hxt
return ITX, IYT
def barycentric_coordinates_of_projection(p, q, u, v):
""" Given a point, gives projected coords of that point to a triangle
in barycentric coordinates.
See Heidrich, Computing the Barycentric Coordinates of a Projected Point, JGT 05
at http://www.cs.ubc.ca/~heidrich/Papers/JGT.05.pdf
Args:
p: point to project
q: a vertex of the triangle to project into
u,v: edges of the the triangle such that it has vertices q, q+u, q+v
Returns:
b: barycentric coordinates of p's projection in triangle defined by q,u,v
vectorized so p,q,u,v can all be 3xN
"""
p = p.T
q = q.T
u = u.T
v = v.T
n = np.cross(u, v, axis=0)
s = np.sum(n*n, axis=0)
# If the triangle edges are collinear, cross-product is zero,
# which makes "s" 0, which gives us divide by zero. So we
# make the arbitrary choice to set s to epsv (=numpy.spacing(1)),
# the closest thing to zero
if np.isscalar(s):
s = s if s else np.spacing(1)
else:
s[s == 0] = np.spacing(1)
oneOver4ASquared = 1.0 / s
w = p - q
b2 = np.sum(np.cross(u, w, axis=0) * n, axis=0) * oneOver4ASquared
b1 = np.sum(np.cross(w, v, axis=0) * n, axis=0) * oneOver4ASquared
b = np.vstack((1 - b1 - b2, b1, b2))
return b.T