def sample_hparams():
hparams = {}
for k, sample_range in ranges.items():
if isinstance(sample_range, (LogRange, LinearRange)):
if isinstance(sample_range[0], int):
# LogRange not valid for ints
hparams[k] = random.randint(sample_range[0], sample_range[1])
elif isinstance(sample_range[0], float):
start, end = sample_range
if isinstance(sample_range, LogRange):
start, end = np.log10(start), np.log10(end)
choice = np.random.uniform(start, end)
if isinstance(sample_range, LogRange):
choice = np.exp(choice)
hparams[k] = choice
return hparams
python类log10()的实例源码
def sample_hparams():
hparams = {}
for k, sample_range in ranges.items():
if isinstance(sample_range, (LogRange, LinearRange)):
if isinstance(sample_range[0], int):
# LogRange not valid for ints
hparams[k] = random.randint(sample_range[0], sample_range[1])
elif isinstance(sample_range[0], float):
start, end = sample_range
if isinstance(sample_range, LogRange):
start, end = np.log10(start), np.log10(end)
choice = np.random.uniform(start, end)
if isinstance(sample_range, LogRange):
choice = np.exp(choice)
hparams[k] = choice
return hparams
def sample_hparams():
hparams = {}
for k, sample_range in ranges.items():
if isinstance(sample_range, (LogRange, LinearRange)):
if isinstance(sample_range[0], int):
# LogRange not valid for ints
hparams[k] = random.randint(sample_range[0], sample_range[1])
elif isinstance(sample_range[0], float):
start, end = sample_range
if isinstance(sample_range, LogRange):
start, end = np.log10(start), np.log10(end)
choice = np.random.uniform(start, end)
if isinstance(sample_range, LogRange):
choice = np.exp(choice)
hparams[k] = choice
return hparams
def sample_hparams():
hparams = {}
for k, sample_range in ranges.items():
if isinstance(sample_range, (LogRange, LinearRange)):
if isinstance(sample_range[0], int):
# LogRange not valid for ints
hparams[k] = random.randint(sample_range[0], sample_range[1])
elif isinstance(sample_range[0], float):
start, end = sample_range
if isinstance(sample_range, LogRange):
start, end = np.log10(start), np.log10(end)
choice = np.random.uniform(start, end)
if isinstance(sample_range, LogRange):
choice = np.exp(choice)
hparams[k] = choice
return hparams
def sample_hparams():
hparams = {}
for k, sample_range in ranges.items():
if isinstance(sample_range, (LogRange, LinearRange)):
if isinstance(sample_range[0], int):
# LogRange not valid for ints
hparams[k] = random.randint(sample_range[0], sample_range[1])
elif isinstance(sample_range[0], float):
start, end = sample_range
if isinstance(sample_range, LogRange):
start, end = np.log10(start), np.log10(end)
choice = np.random.uniform(start, end)
if isinstance(sample_range, LogRange):
choice = np.exp(choice)
hparams[k] = choice
return hparams
def compHistDistance(h1, h2):
def normalize(h):
if np.sum(h) == 0:
return h
else:
return h / np.sum(h)
def smoothstep(x, x_min=0., x_max=1., k=2.):
m = 1. / (x_max - x_min)
b = - m * x_min
x = m * x + b
return betainc(k, k, np.clip(x, 0., 1.))
def fn(X, Y, k):
return 4. * (1. - smoothstep(Y, 0, (1 - Y) * X + Y + .1)) \
* np.sqrt(2 * X) * smoothstep(X, 0., 1. / k, 2) \
+ 2. * smoothstep(Y, 0, (1 - Y) * X + Y + .1) \
* (1. - 2. * np.sqrt(2 * X) * smoothstep(X, 0., 1. / k, 2) - 0.5)
h1 = normalize(h1)
h2 = normalize(h2)
return max(0, np.sum(fn(h2, h1, len(h1))))
# return np.sum(np.where(h2 != 0, h2 * np.log10(h2 / (h1 + 1e-10)), 0)) # KL divergence
def export(self, fileName):
"""
Export data from the ImageView to a file, or to a stack of files if
the data is 3D. Saving an image stack will result in index numbers
being added to the file name. Images are saved as they would appear
onscreen, with levels and lookup table applied.
"""
img = self.getProcessedImage()
if self.hasTimeAxis():
base, ext = os.path.splitext(fileName)
fmt = "%%s%%0%dd%%s" % int(np.log10(img.shape[0])+1)
for i in range(img.shape[0]):
self.imageItem.setImage(img[i], autoLevels=False)
self.imageItem.save(fmt % (base, i, ext))
self.updateImage()
else:
self.imageItem.save(fileName)
def logTickValues(self, minVal, maxVal, size, stdTicks):
## start with the tick spacing given by tickValues().
## Any level whose spacing is < 1 needs to be converted to log scale
ticks = []
for (spacing, t) in stdTicks:
if spacing >= 1.0:
ticks.append((spacing, t))
if len(ticks) < 3:
v1 = int(np.floor(minVal))
v2 = int(np.ceil(maxVal))
#major = list(range(v1+1, v2))
minor = []
for v in range(v1, v2):
minor.extend(v + np.log10(np.arange(1, 10)))
minor = [x for x in minor if x>minVal and x<maxVal]
ticks.append((None, minor))
return ticks
def export(self, fileName):
"""
Export data from the ImageView to a file, or to a stack of files if
the data is 3D. Saving an image stack will result in index numbers
being added to the file name. Images are saved as they would appear
onscreen, with levels and lookup table applied.
"""
img = self.getProcessedImage()
if self.hasTimeAxis():
base, ext = os.path.splitext(fileName)
fmt = "%%s%%0%dd%%s" % int(np.log10(img.shape[0])+1)
for i in range(img.shape[0]):
self.imageItem.setImage(img[i], autoLevels=False)
self.imageItem.save(fmt % (base, i, ext))
self.updateImage()
else:
self.imageItem.save(fileName)
def riess_sn_fit(app_mag_s, app_mag_err_s, z_s, sig_int_s):
# helpful parameters. only fitting an intercept here
n_s = len(app_mag_s)
n_obs = n_s
n_par = 1
y_vec = np.zeros(n_obs)
l_mat = np.zeros((n_obs, n_par))
c_mat_inv = np.zeros((n_obs, n_obs))
# loop through SNe
k = 0
for i in range(0, n_s):
y_vec[k] = np.log10(z2d(z_s[i])) - 0.2 * app_mag_s[i]
l_mat[k, 0] = 1.0
c_mat_inv[k, k] = 1.0 / 0.2 ** 2 / \
(app_mag_err_s[i] ** 2 + sig_int_s ** 2)
k += 1
# fit, calculate residuals in useable form and return
ltci = np.dot(l_mat.transpose(), c_mat_inv)
q_hat_cov = np.linalg.inv(np.dot(ltci, l_mat))
q_hat = np.dot(np.dot(q_hat_cov, ltci), y_vec)
res = y_vec - np.dot(l_mat, q_hat)
return q_hat, np.sqrt(np.diag(q_hat_cov)), res
def absolute_magnitude(parallax, m):
"""Calculate the absolute magnitude based on distance and apparent mag.
Inputs
------
parallax : float
The parallax in mas
m : float
The apparent magnitude
Output
------
M : float
The absolute magnitude
"""
d = 1. / (parallax*1e-3) # Conversion to arcsecond before deriving distance
mu = 5 * np.log10(d) - 5
M = m - mu
return M
def absolute_magnitude(parallax, m):
"""Calculate the absolute magnitude based on distance and apparent mag.
Inputs
------
parallax : float
The parallax in mas
m : float
The apparent magnitude
Output
------
M : float
The absolute magnitude
"""
d = 1. / (parallax*1e-3) # Conversion to arcsecond before deriving distance
mu = 5 * np.log10(d) - 5
M = m - mu
return M
def plot_mean_debye(sol, ax):
x = np.log10(sol[0]["data"]["tau"])
x = np.linspace(min(x), max(x),100)
list_best_rtd = [100*np.sum([a*(x**i) for (i, a) in enumerate(s["params"]["a"])], axis=0) for s in sol]
# list_best_rtd = [s["fit"]["best"] for s in sol]
y = np.mean(list_best_rtd, axis=0)
y_min = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] - sol[0]["params"]["a_std"])], axis=0)
y_max = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] + sol[0]["params"]["a_std"])], axis=0)
ax.errorbar(10**x[(x>-6)&(x<2)], y[(x>-6)&(x<2)], None, None, "-", color='blue',linewidth=2, label="Mean RTD", zorder=10)
plt.plot(10**x[(x>-6)&(x<2)], y_min[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1, label="RTD range")
plt.plot(10**x[(x>-6)&(x<2)], y_max[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1)
plt.fill_between(sol[0]["data"]["tau"], 100*(sol[0]["params"]["m_"]-sol[0]["params"]["m__std"]) , 100*(sol[0]["params"]["m_"]+sol[0]["params"]["m__std"]), color='lightgray', alpha=1, zorder=-1, label="RTD SD")
ax.set_xlabel("Relaxation time (s)", fontsize=14)
ax.set_ylabel("Chargeability (%)", fontsize=14)
plt.yticks(fontsize=14), plt.xticks(fontsize=14)
plt.xscale("log")
ax.set_xlim([1e-6, 1e1])
ax.set_ylim([0, 5.0])
ax.legend(loc=1, fontsize=12)
# ax.set_title(title+" step method", fontsize=14)
def vecnorm(vec, norm, epsilon=1e-3):
"""
Scale a vector to unit length. The only exception is the zero vector, which
is returned back unchanged.
"""
if norm not in ('prob', 'max1', 'logmax1'):
raise ValueError("'%s' is not a supported norm. Currently supported norms include 'prob',\
'max1' and 'logmax1'." % norm)
if isinstance(vec, np.ndarray):
vec = np.asarray(vec, dtype=float)
if norm == 'prob':
veclen = np.sum(np.abs(vec)) + epsilon * len(vec) # smoothing
elif norm == 'max1':
veclen = np.max(vec) + epsilon
elif norm == 'logmax1':
vec = np.log10(1. + vec)
veclen = np.max(vec) + epsilon
if veclen > 0.0:
return (vec + epsilon) / veclen
else:
return vec
else:
raise ValueError('vec should be ndarray, found: %s' % type(vec))
def normalizeMTX(MTX, logScale=False):
""" Normalizes a matrix to [0 ... 1]
Parameters
----------
MTX : array_like
Matrix to be normalized
logScale : bool
Toggle conversion logScale [Default: False]
Returns
-------
MTX : array_liked
Normalized Matrix
"""
MTX -= MTX.min()
MTX /= MTX.max()
if logScale:
MTX += 0.00001
MTX = _np.log10(_np.abs(MTX))
MTX += 5
MTX /= 5.000004343
# MTX = 20 * _np.log10(_np.abs(MTX))
return MTX
def db(data, power=False):
'''Convenience function to calculate the 20*log10(abs(x))
Parameters
----------
data : array_like
signals to be converted to db
power : boolean
data is a power signal and only needs factor 10
Returns
-------
db : array_like
20 * log10(abs(data))
'''
if power:
factor = 10
else:
factor = 20
return factor * np.log10(np.abs(data))
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
d1len = len(self.data1[t1])
if self.use_hist_feats:
assert (t1, t2) in self.hist_feats
caled_hist = np.reshape(self.hist_feats[(t1, t2)], (d1len, hist_size))
if d1len < data1_maxlen:
mhist[:d1len, :] = caled_hist[:, :]
else:
mhist[:, :] = caled_hist[:data1_maxlen, :]
else:
t1_rep = self.embed[self.data1[t1]]
t2_rep = self.embed[self.data2[t2]]
mm = t1_rep.dot(np.transpose(t2_rep))
for (i,j), v in np.ndenumerate(mm):
if i >= data1_maxlen:
break
vid = int((v + 1.) / 2. * ( hist_size - 1.))
mhist[i][vid] += 1.
mhist += 1.
mhist = np.log10(mhist)
return mhist
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
t1_cont = list(self.data1[t1])
t2_cont = list(self.data2[t2])
d1len = len(t1_cont)
if self.use_hist_feats:
assert (t1, t2) in self.hist_feats
caled_hist = np.reshape(self.hist_feats[(t1, t2)], (d1len, hist_size))
if d1len < data1_maxlen:
mhist[:d1len, :] = caled_hist[:, :]
else:
mhist[:, :] = caled_hist[:data1_maxlen, :]
else:
t1_rep = self.embed[t1_cont]
t2_rep = self.embed[t2_cont]
mm = t1_rep.dot(np.transpose(t2_rep))
for (i,j), v in np.ndenumerate(mm):
if i >= data1_maxlen:
break
vid = int((v + 1.) / 2. * ( hist_size - 1.))
mhist[i][vid] += 1.
mhist += 1.
mhist = np.log10(mhist)
return mhist
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
t1_cont = list(self.data1[t1])
t2_cont = list(self.data2[t2])
d1len = len(t1_cont)
if self.use_hist_feats:
assert (t1, t2) in self.hist_feats
curr_pair_feats = list(self.hist_feats[(t1, t2)])
caled_hist = np.reshape(curr_pair_feats, (d1len, hist_size))
if d1len < data1_maxlen:
mhist[:d1len, :] = caled_hist[:, :]
else:
mhist[:, :] = caled_hist[:data1_maxlen, :]
else:
t1_rep = self.embed[t1_cont]
t2_rep = self.embed[t2_cont]
mm = t1_rep.dot(np.transpose(t2_rep))
for (i,j), v in np.ndenumerate(mm):
if i >= data1_maxlen:
break
vid = int((v + 1.) / 2. * ( hist_size - 1.))
mhist[i][vid] += 1.
mhist += 1.
mhist = np.log10(mhist)
return mhist
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
t1_cont = list(self.data1[t1])
t2_cont = list(self.data2[t2])
d1len = len(t1_cont)
if self.use_hist_feats:
assert (t1, t2) in self.hist_feats
caled_hist = np.reshape(self.hist_feats[(t1, t2)], (d1len, hist_size))
if d1len < data1_maxlen:
mhist[:d1len, :] = caled_hist[:, :]
else:
mhist[:, :] = caled_hist[:data1_maxlen, :]
else:
t1_rep = self.embed[t1_cont]
t2_rep = self.embed[t2_cont]
mm = t1_rep.dot(np.transpose(t2_rep))
for (i,j), v in np.ndenumerate(mm):
if i >= data1_maxlen:
break
vid = int((v + 1.) / 2. * ( hist_size - 1.))
mhist[i][vid] += 1.
mhist += 1.
mhist = np.log10(mhist)
return mhist
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
t1_cont = list(self.data1[t1])
t2_cont = list(self.data2[t2])
d1len = len(t1_cont)
if self.use_hist_feats:
assert (t1, t2) in self.hist_feats
curr_pair_feats = list(self.hist_feats[(t1, t2)])
caled_hist = np.reshape(curr_pair_feats, (d1len, hist_size))
if d1len < data1_maxlen:
mhist[:d1len, :] = caled_hist[:, :]
else:
mhist[:, :] = caled_hist[:data1_maxlen, :]
else:
t1_rep = self.embed[t1_cont]
t2_rep = self.embed[t2_cont]
mm = t1_rep.dot(np.transpose(t2_rep))
for (i,j), v in np.ndenumerate(mm):
if i >= data1_maxlen:
break
vid = int((v + 1.) / 2. * ( hist_size - 1.))
mhist[i][vid] += 1.
mhist += 1.
mhist = np.log10(mhist)
return mhist
def SLcomputePSNR(X, Xnoisy):
"""
SLcomputePSNR Compute peak signal to noise ratio (PSNR).
Usage:
PSNR = SLcomputePSNR(X, Xnoisy)
Input:
X: 2D or 3D signal.
Xnoisy: 2D or 3D noisy signal.
Output:
PSNR: The peak signal to noise ratio (in dB).
"""
MSEsqrt = np.linalg.norm(X-Xnoisy) / np.sqrt(X.size)
if MSEsqrt == 0:
return np.inf
else:
return 20 * np.log10(255 / MSEsqrt)
def SLcomputeSNR(X, Xnoisy):
"""
SLcomputeSNR Compute signal to noise ratio (SNR).
Usage:
SNR = SLcomputeSNR(X, Xnoisy)
Input:
X: 2D or 3D signal.
Xnoisy: 2D or 3D noisy signal.
Output:
SNR: The signal to noise ratio (in dB).
"""
if np.linalg.norm(X-Xnoisy) == 0:
return np.Inf
else:
return 10 * np.log10( np.sum(np.power(X,2)) / np.sum(np.power(X-Xnoisy,2)) )
def generate(self, n=1):
"""
Generate a sample of luminosity values within [min, max] from
the above luminosity distribution.
"""
results = []
# Get the maximum value of the flux number density function,
# which is a monotonically decreasing.
M = self.fluxDensity(self.fmin)
for i in range(n):
while True:
u = np.random.uniform() * M
y = 10 ** np.random.uniform(low=np.log10(self.fmin),
high=np.log10(self.fmax))
if u <= self.fluxDensity(y):
results.append(y)
break
return results
def eval(self, t):
# given a time vector t, return the design matrix column vector(s)
if self.type is None:
return np.array([])
hl = np.zeros((t.shape[0],))
ht = np.zeros((t.shape[0],))
if self.type in (0,2):
hl[t >= self.year] = np.log10(1 + (t[t >= self.year] - self.year) / self.T)
if self.type in (1,2):
ht[t >= self.year] = 1
return np.append(ht,hl) if np.any(hl) else ht
def PlotAppRes3Layers_wrapper(fmin, fmax, nbdata, h1, h2, rhol1, rhol2, rhol3, mul1, mul2, mul3, epsl1, epsl2, epsl3, PlotEnvelope, F_Envelope):
frangn=frange(np.log10(fmin), np.log10(fmax), nbdata)
sig3= np.array([0., 0.001, 0.1, 0.001])
thick3 = np.array([120000., 50., 50.])
eps3=np.array([1., 1., 1., 1])
mu3=np.array([1., 1., 1., 1])
chg3=np.array([0., 0.1, 0., 0.2])
chg3_0=np.array([0., 0.1, 0., 0.])
taux3=np.array([0., 0.1, 0., 0.1])
c3=np.array([1., 1., 1., 1.])
sig3[1]=1./rhol1
sig3[2]=1./rhol2
sig3[3]=1./rhol3
mu3[1]=mul1
mu3[2]=mul2
mu3[3]=mul3
eps3[1]=epsl1
eps3[2]=epsl2
eps3[3]=epsl3
thick3[1]=h1
thick3[2]=h2
PlotAppRes(frangn, thick3, sig3, chg3_0, taux3, c3, mu3, eps3, 3, F_Envelope, PlotEnvelope)
def quenchedfrac_zfourge(M, z):
par_q1, par_q2 = zfourgeparams(z, type='quiescent')
x_q1 = 10.**(M-par_q1[1])
dn_q1 = np.log10(np.log(10)*np.exp(-1.*x_q1)*x_q1*(10.**par_q1[3]*x_q1**(par_q1[2]) + 10.**(par_q1[5])*x_q1**(par_q1[4])))
x_q2 = 10.**(M-par_q2[1])
dn_q2 = np.log10(np.log(10)*np.exp(-1.*x_q2)*x_q2*(10.**par_q2[3]*x_q2**(par_q2[2]) + 10.**(par_q2[5])*x_q2**(par_q2[4])))
par_sf1, par_sf2 = zfourgeparams(z, type='star-forming')
x_sf1 = 10.**(M-par_sf1[1])
dn_sf1 = np.log10(np.log(10)*np.exp(-1.*x_sf1)*x_sf1*(10.**par_sf1[3]*x_sf1**(par_sf1[2]) + 10.**(par_sf1[5])*x_sf1**(par_sf1[4])))
x_sf2 = 10.**(M-par_sf2[1])
dn_sf2 = np.log10(np.log(10)*np.exp(-1.*x_sf2)*x_sf2*(10.**par_sf2[3]*x_sf2**(par_sf2[2]) + 10.**(par_sf2[5])*x_sf2**(par_sf2[4])))
fq1 = 10.**dn_q1/(10.**dn_q1+10.**dn_sf1)
fq2 = 10.**dn_q2/(10.**dn_q2+10.**dn_sf2)
return (fq1*(par_q2[0]-z)+fq2*(z-par_q1[0]))/(par_q2[0]-par_q1[0])
# ------ OBSOLETE, left in for backwards-compatibility ------ #
def __scale_coefficient(self, result, result_index, t, sum_log=False):
"""
?????
:param result:????
:param result_index:??????
:param t: ??????
:param sum_log: ??c_coefficient???
:return:
"""
sum_column = np.sum(result[result_index][:, t], axis=0)
if sum_column == 0.:
result[result_index][:, t] = 1. / len(self.__states)
sum_column = 1.
result[result_index][:, t] /= sum_column
if sum_log:
self.__c_coefficient += math.log10(sum_column)
def process(self, **kwargs):
"""Process module."""
self._rest_times = kwargs['rest_times']
self._rest_t_explosion = kwargs[self.key('resttexplosion')]
outputs = OrderedDict()
max_times = max(self._rest_times)
if max_times > self._rest_t_explosion:
outputs['dense_times'] = np.unique(
np.concatenate(([0.0], [
x + self._rest_t_explosion
for x in np.logspace(
self.L_T_MIN,
np.log10(max_times - self._rest_t_explosion),
num=self._n_times)
], self._rest_times)))
else:
outputs['dense_times'] = np.array(self._rest_times)
outputs['dense_indices'] = np.searchsorted(
outputs['dense_times'], self._rest_times)
return outputs
def volcano(data):
if len(data.index.levels[1]) != 2:
raise Exception('Volcano requires secondary index with two values')
indexA, indexB = data.index.levels[1]
dataA = data.xs(indexA, level=1)
dataB = data.xs(indexB, level=1)
meanA = dataA.mean(axis=0)
meanB = dataB.mean(axis=0)
change = meanB.div(meanA)
statistic, pvalues = ttest_ind(dataA, dataB)
pvalues = pd.DataFrame(
[statistic, pvalues, -np.log10(pvalues), change, np.log2(change)],
columns=data.columns,
index=['t', 'p', '-log10(p)', 'foldchange', 'log2(foldchange)']).transpose()
return pvalues