def fit_gamma(ti):
mu = ti.mean()
sigma = ti.std()
C = mu**2/sigma**2
def f(x):
return np.exp(x)/(2.*np.expm1(x)/x - 1.)
def f1(x):
y = 2.*np.expm1(x)/x - 1.
z = 2./x**2*(x*np.exp(x) - np.expm1(x))
return np.exp(x)*(1 - z/y)/y
# newton solve
K = 2.*C # initial value
#print "Newton iteration:"
for i in range(10):
dK = -(f(K) - C)/f1(K)
K = K + dK
# print i, "Residual", f(K) - C, "Value K =", K
#print
tau = mu*(1. - np.exp(-K))/K
return K, tau
python类expm1()的实例源码
def sample_scalar(self, shape, a):
AMAX = 30
if a > AMAX:
return np.random.poisson(a, shape)
k = 1
K = np.full(shape, k)
s = a/np.expm1(a)
S = s
U = np.random.random(shape)
new = S < U
while np.any(new):
k += 1
K[new] = k
s = s*a/float(k)
S = S + s
new = S < U
return K
def c(vec):
"""Complement function for probabilities in the log-space: robustly computes 1-P(A) in the log-space
Args:
vec: vector of negative numbers representing log-probabilities of an event.
Returns: the log-probabilities of (1-P(A)) were log(P(A)) are given in the vec numpy array
Examples:
>>> c(-1e-200)
-460.51701859880916
# >>> np.log(1 - np.exp(-1e-200)) raises a `RuntimeWarning: divide by zero` error
"""
# return np.log1p(-np.exp(vec)) # Not robust to -1e-200
if np.max(np.array(vec)) > 0:
print('vec', vec)
return np.log(-np.expm1(vec))
def c(vec):
"""Complement function for probabilities in the log-space: robustly computes 1-P(A) in the log-space
Args:
vec: vector of negative numbers representing log-probabilities of an event.
Returns: the log-probabilities of (1-P(A)) were log(P(A)) are given in the vec numpy array
Examples:
>>> c(-1e-200)
-460.51701859880916
# >>> np.log(1 - np.exp(-1e-200)) raises a `RuntimeWarning: divide by zero` error
"""
# return np.log1p(-np.exp(vec)) # Not robust to -1e-200
if np.max(np.array(vec)) > 0:
print('vec', vec)
return np.log(-np.expm1(vec))
def vmf_logp(x, lon_lat, kappa):
if x[1] < -90. or x[1] > 90.:
raise ZeroProbability
return -np.inf
if kappa < eps:
return np.log(1. / 4. / np.pi)
mu = np.array([np.cos(lon_lat[1] * d2r) * np.cos(lon_lat[0] * d2r),
np.cos(lon_lat[1] * d2r) * np.sin(lon_lat[0] * d2r),
np.sin(lon_lat[1] * d2r)])
test_point = np.transpose(np.array([np.cos(x[1] * d2r) * np.cos(x[0] * d2r),
np.cos(x[1] * d2r) *
np.sin(x[0] * d2r),
np.sin(x[1] * d2r)]))
logp_elem = np.log( -kappa / ( 2. * np.pi * np.expm1(-2. * kappa)) ) + \
kappa * (np.dot(test_point, mu) - 1.)
logp = logp_elem.sum()
return logp
def _logpdf(self, samples):
if self.theta == 0:
vals = np.zeros(samples.shape[0])
else:
vals = np.log(-self.theta * np.expm1(-self.theta)
* np.exp(-self.theta
* (samples[:, 0] + samples[:, 1]))
/ (np.expm1(-self.theta)
+ np.expm1(-self.theta * samples[:, 0])
* np.expm1(-self.theta * samples[:, 1])) ** 2)
return vals
def _logcdf(self, samples):
if self.theta == 0:
vals = np.sum(np.log(samples), axis=1)
else:
old_settings = np.seterr(divide='ignore')
vals = np.log(-np.log1p(np.expm1(-self.theta * samples[:, 0])
* np.expm1(-self.theta * samples[:, 1])
/ (np.expm1(-self.theta)))) \
- np.log(self.theta)
np.seterr(**old_settings)
return vals
def _ccdf(self, samples):
if self.theta == 0:
vals = samples[:, 0]
else:
vals = np.exp(-self.theta * samples[:, 1]) \
* np.expm1(-self.theta * samples[:, 0]) \
/ (np.expm1(-self.theta)
+ np.expm1(-self.theta * samples[:, 0])
* np.expm1(-self.theta * samples[:, 1]))
return vals
def _ppcf(self, samples):
if self.theta == 0:
vals = samples[:, 0]
else:
vals = -np.log1p(samples[:, 0] * np.expm1(-self.theta)
/ (np.exp(-self.theta * samples[:, 1])
- samples[:, 0] * np.expm1(-self.theta
* samples[:, 1]))) \
/ self.theta
return vals
def prior_and_posterior_for_category(self, category_to_primitives):
category_to_prior = {}
category_to_posterior = {}
# We want to take the logs of the lengh prior, but some elements
# will be zero; entries in the result being -inf is okay, but we want
# to avoid a potentially alarming warning message
M = np.zeros(self.length_prior.shape)
M[self.length_prior > 0.] = np.log(self.length_prior[self.length_prior > 0.])
M[self.length_prior <= 0.] = -np.inf
lengths = np.arange(len(M))
for category, primitives in category_to_primitives.iteritems():
base_prior = 0.
for primitive in primitives:
base_prior += self.base_prior_by_primitive[primitive]
# Effectively we calculate the probability of never
# choosing this category in a rule with 0 primitives, 1
# primitive, etc, then add those up, scaling by the
# probability of having 0 primitives, 1 primitive, etc.;
# for speed and numerical behavior we do this in a
# vectorized way on a log scale:
inclusion_prior = -1.0*np.expm1(
logsumexp(M + (np.log1p(-1.0*base_prior) * lengths))
)
category_to_prior[category] = inclusion_prior
states_in_category = 0
for state in self.state_ensemble_primitives:
if state.intersection(primitives):
states_in_category += 1
category_to_posterior[category] = (
states_in_category / float(len(self.state_ensemble))
)
return category_to_prior, category_to_posterior
def __init__(self, daily_returns, benchmark_daily_returns, risk_free_rate, days, period=DAILY):
assert(len(daily_returns) == len(benchmark_daily_returns))
self._portfolio = daily_returns
self._benchmark = benchmark_daily_returns
self._risk_free_rate = risk_free_rate
self._annual_factor = _annual_factor(period)
self._alpha = None
self._beta = None
self._sharpe = None
self._return = np.expm1(np.log1p(self._portfolio).sum())
self._annual_return = (1 + self._return) ** (365 / days) - 1
# self._annual_return = (1 + self._return) ** (self._annual_factor / len(self._portfolio)) - 1
self._benchmark_return = np.expm1(np.log1p(self._benchmark).sum())
self._benchmark_annual_return = (1 + self._benchmark_return) ** (365 / days) - 1
# self._benchmark_annual_return = (1 + self._benchmark_return) ** (self._annual_factor / len(self._portfolio)) - 1
self._max_drawdown = None
self._volatility = None
self._annual_volatility = None
self._benchmark_volatility = None
self._benchmark_annual_volatility = None
self._information_ratio = None
self._sortino = None
self._tracking_error = None
self._annual_tracking_error = None
self._downside_risk = None
self._annual_downside_risk = None
self._calmar = None
def __init__(self, daily_returns, benchmark_daily_returns, risk_free_rate, days, period=DAILY):
assert(len(daily_returns) == len(benchmark_daily_returns))
self._portfolio = daily_returns
self._benchmark = benchmark_daily_returns
self._risk_free_rate = risk_free_rate
self._annual_factor = _annual_factor(period)
self._daily_risk_free_rate = self._risk_free_rate / self._annual_factor
self._alpha = None
self._beta = None
self._sharpe = None
self._return = np.expm1(np.log1p(self._portfolio).sum())
self._annual_return = (1 + self._return) ** (365 / days) - 1
self._benchmark_return = np.expm1(np.log1p(self._benchmark).sum())
self._benchmark_annual_return = (1 + self._benchmark_return) ** (365 / days) - 1
self._max_drawdown = None
self._volatility = None
self._annual_volatility = None
self._benchmark_volatility = None
self._benchmark_annual_volatility = None
self._information_ratio = None
self._sortino = None
self._tracking_error = None
self._annual_tracking_error = None
self._downside_risk = None
self._annual_downside_risk = None
self._calmar = None
self._avg_excess_return = None
def pdf_direct(self, tt, N=50):
a = self.K
t = tt/self.tau
S = np.ones_like(t)
s = np.ones_like(t)
for k in range(1, N):
s *= (a*t)/(k*(k+1.))
S += s
return np.exp(-t)*a/np.expm1(a) * S /self.tau
def pdf_bessel(self, tt):
a = self.K
t = tt/self.tau
return np.exp(-t)*np.sqrt(a/t)*iv(1., 2.*np.sqrt(a*t))/self.tau/np.expm1(a)
def cdf(self, tt, N=50):
a = self.K
tau = self.tau
gamma = sp.stats.gamma.cdf
S = np.zeros_like(tt)
s = 1.
for k in range(1, N):
s *= a/k
S += s*gamma(tt, k, scale=tau)
return 1./np.expm1(a) * S
def cfd_vec(self, tt, p, N=50):
# cdf that takes parameter as vector input, for fitting
a = p[:, 0:1]
tau = p[:, 1:2]
gamma = sp.stats.gamma.cdf
S = np.ones((p.shape[0], tt.shape[1]))
s = np.ones((1, tt.shape[0]))
for k in range(1, N):
s = s*a/k
S = S + s*gamma(tt, k, scale=tau)
return 1./np.expm1(a) * S
def f(x):
return np.exp(x)/(2.*np.expm1(x)/x - 1.)
def f1(b, N=50):
k = np.arange(1, N)
return np.sum(summand(k, b))*1./np.expm1(b) - np.log(b*np.exp(b)/np.expm1(b))
def h(x):
return np.sqrt(g(x)*(2. - x/np.expm1(x)))
def poisson_from_positiveK(mean):
# solve x/(1 - exp(-x)) == mean
def f(x):
return x/(1. - np.exp(-x))
def f1(x):
return (np.expm1(x) - x)/(2.*np.cosh(x) - 2.)
x = solve(mean, f, f1, mean, n=10)
return x
def f(x):
return exp(x)/(2.*expm1(x)/x - 1.)
def f(b, N=50):
k = np.arange(1, N)
return np.sum(summand(k, b))*1./np.expm1(b) - np.log(b*np.exp(b)/np.expm1(b))
def poisson_from_positiveK(mean):
# solve x/(1 - exp(-x)) == mean
def f(x):
return x/(1. - np.exp(-x))
def f1(x):
return (np.expm1(x) - x)/(2.*np.cosh(x) - 2.)
x = solve_newton(mean, f, f1, mean, n=10)
return x
def sample_(self, shape, a):
if np.isscalar(a) or a.size == 1:
return self.sample_scalar(shape, a)
#print shape, a.shape
AMAX = 30
k = 1
K = np.full(shape, k)
# for large a values, just use non-truncated poisson
large = broadcast_mask(a > AMAX)
small = broadcast_mask(a <= AMAX)
K[large] = np.random.poisson(a[large], K[large].shape)
Ksmall = K[small]
a = a[small]
# actual algorithm begins here
s = a/np.expm1(a)
S = s
U = np.random.random(Ksmall.shape)
new = S < U
while np.any(new):
k += 1
Ksmall[new] = k
s = s*a/float(k)
S = S + s
new = S < U
K[small] = Ksmall
return K
def pdf_(self, x, a):
return stats.poisson.pmf(x, a)*np.exp(a)/np.expm1(a)
def backward(self, y):
"""
Inverse of the softplus transform:
.. math::
x = \log( \exp(y) - 1)
The bound for the input y is [self._lower. inf[, self._lower is
subtracted prior to any calculations. The implementation avoids overflow
explicitly by applying the log sum exp trick:
.. math::
\log ( \exp(y) - \exp(0)) &= ys + \log( \exp(y-ys) - \exp(-ys)) \\
&= ys + \log( 1 - \exp(-ys)
ys = \max(0, y)
As y can not be negative, ys could be replaced with y itself.
However, in case :math:`y=0` this results in np.log(0). Hence the zero is
replaced by a machine epsilon.
.. math::
ys = \max( \epsilon, y)
"""
ys = np.maximum(y - self._lower, np.finfo(settings.float_type).eps)
return ys + np.log(-np.expm1(-ys))
def norm_y_inv(y_bc):
return np.expm1((y_bc * norm_y_lambda + 1)**(1/norm_y_lambda))
def expm1(self, out=None):
assert out is None
return self.elemwise(np.expm1)
def step_math(self, dt, J, spiked, voltage, refractory_time):
# reduce all refractory times by dt
refractory_time -= dt
# compute effective dt for each neuron, based on remaining time.
# note that refractory times that have completed midway into this
# timestep will be given a partial timestep, and moreover these will
# be subtracted to zero at the next timestep (or reset by a spike)
delta_t = (dt - refractory_time).clip(0, dt)
# update voltage using discretized lowpass filter
# since v(t) = v(0) + (J - v(0))*(1 - exp(-t/tau)) assuming
# J is constant over the interval [t, t + dt)
voltage -= (J - voltage) * np.expm1(-delta_t / self.tau_rc)
# determine which neurons spiked (set them to 1/dt, else 0)
spiked_mask = voltage > 1
spiked[:] = spiked_mask / dt
# set v(0) = 1 and solve for t to compute the spike time
t_spike = dt + self.tau_rc * np.log1p(
-(voltage[spiked_mask] - 1) / (J[spiked_mask] - 1))
# set spiked voltages to zero, refractory times to tau_ref, and
# rectify negative voltages to a floor of min_voltage
voltage[voltage < self.min_voltage] = self.min_voltage
voltage[spiked_mask] = 0
refractory_time[spiked_mask] = self.tau_ref + t_spike
def test_numpy_method():
# This type of code is used frequently by PyMC3 users
x = tt.dmatrix('x')
data = np.random.rand(5, 5)
x.tag.test_value = data
for fct in [np.arccos, np.arccosh, np.arcsin, np.arcsinh,
np.arctan, np.arctanh, np.ceil, np.cos, np.cosh, np.deg2rad,
np.exp, np.exp2, np.expm1, np.floor, np.log,
np.log10, np.log1p, np.log2, np.rad2deg,
np.sin, np.sinh, np.sqrt, np.tan, np.tanh, np.trunc]:
y = fct(x)
f = theano.function([x], y)
utt.assert_allclose(np.nan_to_num(f(data)),
np.nan_to_num(fct(data)))