def __init__(self):
QtWidgets.QWidget.__init__(self)
uic.loadUi("window.ui", self)
self.scene = QGraphicsScene(0, 0, 711, 601)
self.scene.win = self
self.view.setScene(self.scene)
self.image = QImage(710, 600, QImage.Format_Alpha8)
self.image.fill(black)
self.pen = QPen(black)
self.draw.clicked.connect(lambda: draw(self))
self.dial_x.valueChanged.connect(lambda: draw(self))
self.dial_y.valueChanged.connect(lambda: draw(self))
self.dial_z.valueChanged.connect(lambda: draw(self))
self.funcs.addItem("cos(x) * sin(z)")
self.funcs.addItem("2 * cos(x * z)")
self.funcs.addItem("exp(sin(sqrt(x^2 + z^2)))")
self.funcs.addItem("x^2 / 20 + z^2 / 20")
self.funcs.addItem("|sin(x) * sin(z)|")
python类exp()的实例源码
def exp_damped_minute_difference(self, dt1, dt2, alpha):
"""
Computes exp(-alpha * t), where t is the difference between two
datetimes in minutes.
Args:
dt1: A datetime such that dt1 >= dt2.
dt2: A datetime such that dt1 >= dt2.
alpha: A nonnegative float representing the damping factor.
Returns:
A float equal to exp(-alpha * t), where t is the difference between
two datetimes in minutes.
"""
if dt1 < dt2:
raise ValueError('Must have dt1 >= dt2')
if alpha < 0:
raise ValueError('Must have alpha >= 0')
t = self.minute_difference(dt1, dt2)
return math.exp(-alpha * t)
def exp_damped_day_difference(self, dt1, dt2, alpha):
"""
Computes exp(-alpha * t), where t is the difference between two
datetimes in days.
Args:
dt1: A datetime such that dt1 >= dt2.
dt2: A datetime such that dt1 >= dt2.
alpha: A nonnegative float representing the damping factor.
Returns:
A float equal to exp(-alpha * t), where t is the difference between
two datetimes in days.
"""
if dt1 < dt2:
raise ValueError('Must have dt1 >= dt2')
if alpha < 0:
raise ValueError('Must have alpha >= 0')
minute_diff = self.minute_difference(dt1, dt2)
day_diff = float(minute_diff) / (self.HOURS_PER_DAY * self.MINUTES_PER_HOUR)
return math.exp(-alpha * day_diff)
Gaussian_sampling.py 文件源码
项目:Lattice-Based-Signatures
作者: krishnacharya
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def Bernoulli_exp(x):
'''
Description:
Algorithm 8 in BLISS paper
Sample according to exp(-x/f) for x E [0,2^l)
or x is an integer in binary form of lenght l
f is a real.
i/p:
x: int
f: float
'''
bin_rep = map(int, list(bin(x)[2:])) # list with 0's and 1's reprsenting x. msb is first as usual
d = len(bin_rep) # length of the current integer in binary, d < l
# starting from l-1, as then smallest probabilities are checked first and algorithm terminates faster
for i in range(0, d):
if(bin_rep[i]):
A = Bernoulli_rv(c[d-i-1])
if not A:
return 0
return 1
# uses the same fixed real f
PlottingSpectralEmissivities.py 文件源码
项目:Python4ScientificComputing_Fundamentals
作者: bnajafi
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def spectralBlackBody(Lambda=0.7, T=5800):
""" here is the explanation of this function"""
import math
c0 = 2.9979*10**8 #m/s speed of light in vacuum
h_Plank=6.626069*10**-34 #J.s Plank's Constant
sigma_stefan_Boltzmann= 5.67*10**-8 #Stefan-Boltzmann Constant
n=1 #the index of refraction of that medium
c=c0/n# the speed of propagation of a wave in the medium
F=c/Lambda #the frequency of the wave
e_wave=h_Plank*F
E_blackBody = sigma_stefan_Boltzmann*T**4
k_Boltzmann=1.38065*10**-23 #J/K Boltzmann Constant
#Plank's Law:
C1=2*math.pi*h_Plank*c0**2*(10**24)#J*s*m^2/s^2--W*m2 --->W
C2=h_Plank*c0/k_Boltzmann*(10**6) #microm m/K
EmissiveSpectral= C1/(Lambda**5*(math.exp(C2/(Lambda*T))-1))
outPut = {"EmissiveSpectral":EmissiveSpectral,"E_blackBody":E_blackBody}
return outPut
def get_folding_rate_for_seq(seq, secstruct, temp, refT=37.0):
"""Scale the predicted kinetic folding rate of a protein to temperature T, based on the relationship ln(k_f)?1/T
Args:
seq (str, Seq, SeqRecord): Amino acid sequence
secstruct (str): Structural class: ``all-alpha``, ``all-beta``, ``mixed``, or ``unknown``
temp (float): Temperature in degrees C
refT (float): Reference temperature, default to 37 C
Returns:
float: Kinetic folding rate k_f at temperature T.
"""
# Not much data available on this slope value, however its effect on growth rate in a model is very small
slope = 22000
# Get folding rate for the reference temperature
ref_rate = get_foldrate(seq, secstruct)
preFactor = float(ref_rate) + slope / (float(refT) + 273.15)
# Calculate folding rate at desired temperature
rate = math.exp(preFactor - slope / (float(temp) + 273.15))
return rate
def fit_power_law(x, y):
"""
"""
ln_x = NP.log(x)
ln_y = NP.log(y)
# least squares solution
A = NP.empty((len(x), 2))
A[:, 0] = 1
A[:, 1] = ln_x
#b_ls = NP.linalg.lstsq(A, ln_y)[0]
# total least-squares solution
X = NP.empty((len(x), 3))
X[:, :2] = A
X[:, 2] = ln_y
U, S, V = NP.linalg.svd(X, 1)
b_tls = (V[-1, :] / -V[-1, -1])[:2]
alpha = math.exp(b_tls[0])
beta = b_tls[1]
return alpha, beta
def apply_regr(x, y, w, h, tx, ty, tw, th):
try:
cx = x + w/2.
cy = y + h/2.
cx1 = tx * w + cx
cy1 = ty * h + cy
w1 = math.exp(tw) * w
h1 = math.exp(th) * h
x1 = cx1 - w1/2.
y1 = cy1 - h1/2.
x1 = int(round(x1))
y1 = int(round(y1))
w1 = int(round(w1))
h1 = int(round(h1))
return x1, y1, w1, h1
except ValueError:
return x, y, w, h
except OverflowError:
return x, y, w, h
except Exception as e:
print(e)
return x, y, w, h
def pixel_to_lonlat(self, px, zoom):
"Converts a pixel to a longitude, latitude pair at the given zoom level."
if len(px) != 2:
raise TypeError('Pixel should be a sequence of two elements.')
# Getting the number of pixels for the given zoom level.
npix = self._npix[zoom]
# Calculating the longitude value, using the degrees per pixel.
lon = (px[0] - npix) / self._degpp[zoom]
# Calculating the latitude value.
lat = RTOD * (2 * atan(exp((px[1] - npix) / (-1.0 * self._radpp[zoom]))) - 0.5 * pi)
# Returning the longitude, latitude coordinate pair.
return (lon, lat)
def pixel_to_lonlat(self, px, zoom):
"Converts a pixel to a longitude, latitude pair at the given zoom level."
if len(px) != 2:
raise TypeError('Pixel should be a sequence of two elements.')
# Getting the number of pixels for the given zoom level.
npix = self._npix[zoom]
# Calculating the longitude value, using the degrees per pixel.
lon = (px[0] - npix) / self._degpp[zoom]
# Calculating the latitude value.
lat = RTOD * (2 * atan(exp((px[1] - npix) / (-1.0 * self._radpp[zoom]))) - 0.5 * pi)
# Returning the longitude, latitude coordinate pair.
return (lon, lat)
def gen_comm_info(self,main_rdd):
def cominfo(tpl):
p=[]
for ((tx,lam),index) in tpl:
p.append(np.matrix(tx).T*lam)
return p
def findDim(tpl):
for ((tx,lam),index) in tpl:
d = len(tx)
return d
d = main_rdd.mapValues(findDim).values().reduce(lambda x,y:x)
c=main_rdd.flatMapValues(cominfo).map(lambda (key,value):value).reduce(lambda x,y:x+y)
V=matrix(0.0,(d,1))
for j in range(d):
V[j]=math.exp(-self.C*self.r[j]*c[j,0])
return d,V
def erf(x):
# save the sign of x
sign = 1
if x < 0:
sign = -1
x = abs(x)
# constants
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
# A&S formula 7.1.26
t = 1.0/(1.0 + p*x)
y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*math.exp(-x*x)
return sign*y # erf(-x) = -erf(x)
def apply_regr(x, y, w, h, tx, ty, tw, th):
try:
cx = x + w/2.
cy = y + h/2.
cx1 = tx * w + cx
cy1 = ty * h + cy
w1 = math.exp(tw) * w
h1 = math.exp(th) * h
x1 = cx1 - w1/2.
y1 = cy1 - h1/2.
x1 = int(round(x1))
y1 = int(round(y1))
w1 = int(round(w1))
h1 = int(round(h1))
return x1, y1, w1, h1
except ValueError:
return x, y, w, h
except OverflowError:
return x, y, w, h
except Exception as e:
print(e)
return x, y, w, h
def test_forward_works_without_mask(self):
log_likelihood = self.crf(self.logits, self.tags).data[0]
# Now compute the log-likelihood manually
manual_log_likelihood = 0.0
# For each instance, manually compute the numerator
# (which is just the score for the logits and actual tags)
# and the denominator
# (which is the log-sum-exp of the scores for the logits across all possible tags)
for logits_i, tags_i in zip(self.logits, self.tags):
numerator = self.score(logits_i.data, tags_i.data)
all_scores = [self.score(logits_i.data, tags_j) for tags_j in itertools.product(range(5), repeat=3)]
denominator = math.log(sum(math.exp(score) for score in all_scores))
# And include them in the manual calculation.
manual_log_likelihood += numerator - denominator
# The manually computed log likelihood should equal the result of crf.forward.
assert manual_log_likelihood == approx(log_likelihood)
def calc_gauss_amp(node_xyz, center=(0.0, 0.0, -2.0), sigma=(1.0, 1.0, 1.0),
amp=1.0, amp_cut=0.05, sym="qsym"):
"""calculated the Gaussian amplitude at the node
:param node_xyz: list of x,y,z node coordinates
:param center: list of x,y,z for Gaussian center
:param sigma: list of x,y,z Guassian width
:param amp: peak Gaussian source amplitude
:param amp_cut: lower threshold (pct of max) for amplitude creating a
point load
:param qsym: mesh symemetry (qsym, hsym, none)
:returns: nodeGaussAmp - point load amplitude at the specified node
"""
from math import pow, exp
exp1 = pow((node_xyz[1] - center[0]) / sigma[0], 2)
exp2 = pow((node_xyz[2] - center[1]) / sigma[1], 2)
exp3 = pow((node_xyz[3] - center[2]) / sigma[2], 2)
nodeGaussAmp = amp * exp(-(exp1 + exp2 + exp3))
if (nodeGaussAmp / amp) < amp_cut:
nodeGaussAmp = None
else:
nodeGaussAmp = sym_scale_amp(node_xyz, nodeGaussAmp, sym)
return nodeGaussAmp
def black_scholes_numba(stockPrice, optionStrike,
optionYears, Riskfree, Volatility):
callResult = np.empty_like(stockPrice)
putResult = np.empty_like(stockPrice)
S = stockPrice
X = optionStrike
T = optionYears
R = Riskfree
V = Volatility
for i in range(len(S)):
sqrtT = math.sqrt(T[i])
d1 = (math.log(S[i] / X[i]) + (R + 0.5 * V * V) * T[i]) / (V * sqrtT)
d2 = d1 - V * sqrtT
cndd1 = cnd_numba(d1)
cndd2 = cnd_numba(d2)
expRT = math.exp((-1. * R) * T[i])
callResult[i] = (S[i] * cndd1 - X[i] * expRT * cndd2)
putResult[i] = (X[i] * expRT * (1.0 - cndd2) - S[i] * (1.0 - cndd1))
return callResult, putResult
def black_scholes_cuda_kernel(callResult, putResult, S, X,
T, R, V):
# S = stockPrice
# X = optionStrike
# T = optionYears
# R = Riskfree
# V = Volatility
i = cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
if i >= S.shape[0]:
return
sqrtT = math.sqrt(T[i])
d1 = (math.log(S[i] / X[i]) + (R + 0.5 * V * V) * T[i]) / (V * sqrtT)
d2 = d1 - V * sqrtT
cndd1 = cnd_cuda(d1)
cndd2 = cnd_cuda(d2)
expRT = math.exp((-1. * R) * T[i])
callResult[i] = (S[i] * cndd1 - X[i] * expRT * cndd2)
putResult[i] = (X[i] * expRT * (1.0 - cndd2) - S[i] * (1.0 - cndd1))
def gleu(self, stats, smooth=False):
"""Compute GLEU from collected statistics obtained by call(s) to gleu_stats"""
# smooth 0 counts for sentence-level scores
if smooth:
stats = [s if s != 0 else 1 for s in stats]
if len(filter(lambda x: x == 0, stats)) > 0:
return 0
(c, r) = stats[:2]
log_gleu_prec = sum([math.log(float(x) / y)
for x, y in zip(stats[2::2], stats[3::2])]) / 4
for i, (x, y) in enumerate(zip(stats[2::2], stats[3::2])) :
pass
#print 'Precision', i+1, '=', x, '/', y, '=', 1.*x/y
# log_gleu_prec = sum([math.log(float(x) / y)
# for x, y in zip(stats[2::2], stats[3::2])]) / 4
return math.exp(min([0, 1 - float(r) / c]) + log_gleu_prec)
def getPoissonSample(self, mean):
# Using Knuth's algorithm described in
# http://en.wikipedia.org/wiki/Poisson_distribution
if mean < 20.0:
# one exp and k+1 random calls
l = math.exp(-mean)
p = self._random.random()
k = 0
while p > l:
k += 1
p *= self._random.random()
else:
# switch to the log domain, k+1 expovariate (random + log) calls
p = self._random.expovariate(mean)
k = 0
while p < 1.0:
k += 1
p += self._random.expovariate(mean)
return k
def GeomMeanAndStdDevFromHistogram(histogram_json):
histogram = json.loads(histogram_json)
# Handle empty histograms gracefully.
if not 'buckets' in histogram:
return 0.0, 0.0
count = 0
sum_of_logs = 0
for bucket in histogram['buckets']:
if 'high' in bucket:
bucket['mean'] = (bucket['low'] + bucket['high']) / 2.0
else:
bucket['mean'] = bucket['low']
if bucket['mean'] > 0:
sum_of_logs += math.log(bucket['mean']) * bucket['count']
count += bucket['count']
if count == 0:
return 0.0, 0.0
sum_of_squares = 0
geom_mean = math.exp(sum_of_logs / count)
for bucket in histogram['buckets']:
if bucket['mean'] > 0:
sum_of_squares += (bucket['mean'] - geom_mean) ** 2 * bucket['count']
return geom_mean, math.sqrt(sum_of_squares / count)
def c_qv2(T,omega):
x = omega * kb1 / T
expx = math.exp(-x) # exponential term
x2 = math.pow(x,2)
return x2*K_BOLTZMANN_RY*expx/math.pow(expx-1.0,2)
################################################################################
#
# This function computes the thermal expansions alpha using the Gruneisein
# parameters
# more comments to be added
# First with min0, freq and grun T-independent
#
# More ibrav types to be implemented
def c_qv_python(T,omega):
"""
This function calculates the mode contribution to the heat capacity at a given T
and omega. A similar (faster) function should be available as C extension.
"""
#print ("Python c_qv")
if (T<1E-9 or omega<1E-9):
return 0.0
x = omega * KB1 / T
expx = math.exp(-x) # exponential term
x2 = math.pow(x,2)
if expx>1E-3: # compute normally
return x2*K_BOLTZMANN_RY*expx/math.pow(expx-1.0,2)
else: # Taylor series
return K_BOLTZMANN_RY*expx* (x/math.pow(x-0.5*math.pow(x,2)+
0.16666666666666667*math.pow(x,3)+0.04166666666666666667*math.pow(x,4),2))
################################################################################
#
# If available use a c version of the function c_qv, else use the (slower)
# Python version
#
def GS_kernel_precomp_P(str1, str2, psiDict, sigmaPos, sigmaAA, L, P):
len_str1 = len(str1)
len_str2 = len(str2)
A = np.zeros((len_str1, len_str2))
for i in xrange(len_str1):
for j in xrange(len_str2):
try:
A[i, j] = psiDict[str1[i], str2[j]]
except:
if str1[i] != str2[j]:
A[i, j] = 4.0
A /= -2.0 * (sigmaAA ** 2.0)
A = np.exp(A)
B = np.zeros((len_str1, len_str2))
for i in xrange(len_str1):
for j in xrange(len_str2):
tmp = 1.0
for l in xrange(L):
if i + l < len_str1 and j + l < len_str2:
tmp *= A[i + l, j + l]
B[i, j] += tmp
return np.sum(P * B)
def get_transfered_data(lon, lat, theta, data_frame, max_distance_on_position=1.0*math.pi, max_distance_on_degree=180.0, final_discount_to=10**(-4)):
distance_on_position = haversine(lon1=lon,
lat1=lat,
lon2=data_frame.p[0],
lat2=data_frame.p[1])
distance_on_degree = abs(theta - data_frame.theta)
if(distance_on_degree>180):
distance_on_degree = distance_on_degree - 180
thegma_2_on_position = -0.5*(max_distance_on_position**2)/math.log(final_discount_to)
thegma_2_on_degree = -0.5*(max_distance_on_degree**2)/math.log(final_discount_to)
'''guassion trustworthy transfer'''
prob = 1.0 * math.exp(-1.0 / 2.0 * (distance_on_position**2) / (thegma_2_on_position)) * math.exp(-1.0 / 2.0 * (distance_on_degree**2) / (thegma_2_on_degree))
return prob
def _compute_bleu(p, len_pred, len_gold, smooth):
# Brevity penalty.
log_brevity = 1 - max(1, (len_gold + smooth) / (len_pred + smooth))
log_score = 0
ngrams = len(p) - 1
for n in range(1, ngrams + 1):
if p[n][1] > 0:
if p[n][0] == 0:
p[n][0] = 1e-16
log_precision = math.log((p[n][0] + smooth) / (p[n][1] + smooth))
log_score += log_precision
log_score /= ngrams
return math.exp(log_score + log_brevity)
# Calculate BLEU of prefixes of pred.
def __init__(self, model, action_size=1, init_value=0.0, *args, **kwargs):
super(DiagonalGaussianPolicy, self).__init__(model, *args, **kwargs)
self.init_value = init_value
self.logstd = th.zeros((1, action_size)) + self.init_value
self.logstd = P(self.logstd)
self.halflog2pie = V(T([2 * pi * exp(1)])) * 0.5
self.halflog2pi = V(T([2.0 * pi])) * 0.5
self.pi = V(T([pi]))
def _normal(self, x, mean, logstd):
std = logstd.exp()
std_sq = std.pow(2)
a = (-(x - mean).pow(2) / (2 * std_sq)).exp()
b = (2 * std_sq * self.pi.expand_as(std_sq)).sqrt()
return a / b
def forward(self, x, *args, **kwargs):
action = super(DiagonalGaussianPolicy, self).forward(x, *args, **kwargs)
size = action.raw.size()
std = self.logstd.exp().expand_as(action.raw)
value = action.raw + std * V(th.randn(size))
value = value.detach()
action.value = value
# action.logstd = self.logstd.clone()
action.logstd = self.logstd
action.prob = lambda: self._normal(value, action.raw, action.logstd)
action.entropy = action.logstd + self.halflog2pie
var = std.pow(2)
action.compute_log_prob = lambda a: (- ((a - action.raw).pow(2) / (2.0 * var)) - self.halflog2pi - action.logstd).mean(1)
action.log_prob = action.compute_log_prob(value)
return action
def get_user_to_damped_n_messages(self, dt_max, alpha):
"""
Maps each user to the number of messages before a reference datetime,
where each message count is exponentially damped by a constant times
the difference between the reference datetime and the datetime of the
message.
Args:
dt_max: A datetime representing the max datetime of messages
to consider.
alpha: A nonnegative float representing the damping factor.
Returns:
user_to_damped_n_messages: A dict mapping each user in
self.users_union to the damped number of messages by that user
before dt_max. The contribution of a message is a float equal
to exp(-alpha * t), where t is the difference in days between
dt_max and the datetime of the message.
"""
if alpha < 0:
raise ValueError('Must have alpha >= 0')
try:
# Only keep messages with datetimes <= dt_max
filtered = self.filter_by_datetime(end_dt=dt_max)
except EmptyConversationError:
# Map all users to 0 if dt_max occurs before all messages
return self.get_user_to_message_statistic(lambda x: 0)
damped_message_count = lambda x: self.exp_damped_day_difference(dt_max, x.timestamp, alpha)
user_to_damped_n_messages = filtered.get_user_to_message_statistic(damped_message_count)
return user_to_damped_n_messages
def damped_n_messages(self, dt_max, alpha):
"""
Computes the sum of damped message counts before a reference datetime,
where each damped message count is exponentially damped by a constant
times the difference between the reference datetime and the datetime of
the message.
Args:
dt_max: A datetime representing the max datetime of messages to
consider.
alpha: A nonnegative float representing the damping factor.
Returns:
damped_n_messages_total: A float equal to the sum of damped message
counts before dt_max. The contribution of a message is
exp(-alpha * t), where t is the difference in days between
dt_max and the datetime of the message.
"""
if alpha < 0:
raise ValueError('Must have alpha >= 0')
try:
# Only keep messages with datetimes <= dt_max
filtered = self.filter_by_datetime(end_dt=dt_max)
except EmptyConversationError:
# dt_max occurs before all messages
return 0
damped_message_count = lambda x: self.exp_damped_day_difference(dt_max, x.timestamp, alpha)
damped_n_messages_total = filtered.sum_conversation_message_statistic(damped_message_count)
return damped_n_messages_total