def _get_magnification_w_times(self, source_x, source_y, radius,
magnification_center=None):
"""Evaluates Gould (2008) eq. 8"""
shift = radius / sqrt(2.)
dx = [1., -1., -1., 1.]
dy = [1., 1., -1., -1.]
out = []
for (i, dxval) in enumerate(dx):
x = source_x + dxval * shift
y = source_y + dy[i] * shift
out.append(self.point_source_magnification(
source_x=x, source_y=y))
if magnification_center is None:
magnification_center = self.point_source_magnification(
source_x=source_x, source_y=source_y)
return 0.25 * fsum(out) - magnification_center
python类fsum()的实例源码
def q(self, new_q):
# Update epsilon
new_q = np.insert(new_q, 0, 1.)
self._epsilon = new_q / fsum(new_q)
try:
if np.array(new_q).size == self._epsilon.size - 1:
# Case 3: the entire lens is defined (new_q changes
# the values of q)
pass
else:
# Case 2: the primary is defined (new_q adds masses)
if ((self._total_mass is not None) and
(self._last_mass_set != 'total_mass')):
self._total_mass = self._total_mass * fsum(new_q)
except AttributeError:
# Case 1: nothing is initialized (new_q directly sets epsilon)
pass
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
# Reflection to right half of complex plane
if z < 0.5:
return pi / sin(pi*z) / gamma(1.0-z)
# Lanczos approximation with g=7
az = z + (7.0 - 0.5)
return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
0.9999999999995183,
676.5203681218835 / z,
-1259.139216722289 / (z+1.0),
771.3234287757674 / (z+2.0),
-176.6150291498386 / (z+3.0),
12.50734324009056 / (z+4.0),
-0.1385710331296526 / (z+5.0),
0.9934937113930748e-05 / (z+6.0),
0.1659470187408462e-06 / (z+7.0),
])
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
# Reflection to right half of complex plane
if z < 0.5:
return pi / sin(pi*z) / gamma(1.0-z)
# Lanczos approximation with g=7
az = z + (7.0 - 0.5)
return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
0.9999999999995183,
676.5203681218835 / z,
-1259.139216722289 / (z+1.0),
771.3234287757674 / (z+2.0),
-176.6150291498386 / (z+3.0),
12.50734324009056 / (z+4.0),
-0.1385710331296526 / (z+5.0),
0.9934937113930748e-05 / (z+6.0),
0.1659470187408462e-06 / (z+7.0),
])
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
# Reflection to right half of complex plane
if z < 0.5:
return pi / sin(pi*z) / gamma(1.0-z)
# Lanczos approximation with g=7
az = z + (7.0 - 0.5)
return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
0.9999999999995183,
676.5203681218835 / z,
-1259.139216722289 / (z+1.0),
771.3234287757674 / (z+2.0),
-176.6150291498386 / (z+3.0),
12.50734324009056 / (z+4.0),
-0.1385710331296526 / (z+5.0),
0.9934937113930748e-05 / (z+6.0),
0.1659470187408462e-06 / (z+7.0),
])
def pooled_sample_variance(sample1, sample2):
"""Find the pooled sample variance for two samples.
Args:
sample1: one sample.
sample2: the other sample.
Returns:
Pooled sample variance, as a float.
"""
deg_freedom = len(sample1) + len(sample2) - 2
mean1 = statistics.mean(sample1)
squares1 = ((x - mean1) ** 2 for x in sample1)
mean2 = statistics.mean(sample2)
squares2 = ((x - mean2) ** 2 for x in sample2)
return (math.fsum(squares1) + math.fsum(squares2)) / float(deg_freedom)
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
# Reflection to right half of complex plane
if z < 0.5:
return pi / sin(pi*z) / gamma(1.0-z)
# Lanczos approximation with g=7
az = z + (7.0 - 0.5)
return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
0.9999999999995183,
676.5203681218835 / z,
-1259.139216722289 / (z+1.0),
771.3234287757674 / (z+2.0),
-176.6150291498386 / (z+3.0),
12.50734324009056 / (z+4.0),
-0.1385710331296526 / (z+5.0),
0.9934937113930748e-05 / (z+6.0),
0.1659470187408462e-06 / (z+7.0),
])
def pooled_sample_variance(sample1, sample2):
"""Find the pooled sample variance for two samples.
Args:
sample1: one sample.
sample2: the other sample.
Returns:
Pooled sample variance, as a float.
"""
deg_freedom = len(sample1) + len(sample2) - 2
mean1 = statistics.mean(sample1)
squares1 = ((x - mean1) ** 2 for x in sample1)
mean2 = statistics.mean(sample2)
squares2 = ((x - mean2) ** 2 for x in sample2)
return (math.fsum(squares1) + math.fsum(squares2)) / float(deg_freedom)
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
# Reflection to right half of complex plane
if z < 0.5:
return pi / sin(pi*z) / gamma(1.0-z)
# Lanczos approximation with g=7
az = z + (7.0 - 0.5)
return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
0.9999999999995183,
676.5203681218835 / z,
-1259.139216722289 / (z+1.0),
771.3234287757674 / (z+2.0),
-176.6150291498386 / (z+3.0),
12.50734324009056 / (z+4.0),
-0.1385710331296526 / (z+5.0),
0.9934937113930748e-05 / (z+6.0),
0.1659470187408462e-06 / (z+7.0),
])
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
# Reflection to right half of complex plane
if z < 0.5:
return pi / sin(pi*z) / gamma(1.0-z)
# Lanczos approximation with g=7
az = z + (7.0 - 0.5)
return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
0.9999999999995183,
676.5203681218835 / z,
-1259.139216722289 / (z+1.0),
771.3234287757674 / (z+2.0),
-176.6150291498386 / (z+3.0),
12.50734324009056 / (z+4.0),
-0.1385710331296526 / (z+5.0),
0.9934937113930748e-05 / (z+6.0),
0.1659470187408462e-06 / (z+7.0),
])
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
# Reflection to right half of complex plane
if z < 0.5:
return pi / sin(pi*z) / gamma(1.0-z)
# Lanczos approximation with g=7
az = z + (7.0 - 0.5)
return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
0.9999999999995183,
676.5203681218835 / z,
-1259.139216722289 / (z+1.0),
771.3234287757674 / (z+2.0),
-176.6150291498386 / (z+3.0),
12.50734324009056 / (z+4.0),
-0.1385710331296526 / (z+5.0),
0.9934937113930748e-05 / (z+6.0),
0.1659470187408462e-06 / (z+7.0),
])
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
# Reflection to right half of complex plane
if z < 0.5:
return pi / sin(pi*z) / gamma(1.0-z)
# Lanczos approximation with g=7
az = z + (7.0 - 0.5)
return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
0.9999999999995183,
676.5203681218835 / z,
-1259.139216722289 / (z+1.0),
771.3234287757674 / (z+2.0),
-176.6150291498386 / (z+3.0),
12.50734324009056 / (z+4.0),
-0.1385710331296526 / (z+5.0),
0.9934937113930748e-05 / (z+6.0),
0.1659470187408462e-06 / (z+7.0),
])
def trapz(funct, args, a, b):
N = 100 #number of steps
step = (b-a)/N #step size
y = [] #initialize a list of values of y
for i in range(1,N-1) #loop through values of x while omitting the first and last points
x = a + (step * i) #each subsequent x value will be increased by the step size
y.append(funct(x,args)) # call the desired function and pass it the required arguments and x value
mid = math.fsum(y) #sum the values of y
area = step * ((funct(b,args) - funct(a,args))/2 + mid) #find the area under the curve with the trapezoid
#rule for numerical itnegration
return area #return the value of area back to the calling function
def sample(self, probs, temperature):
if temperature == 0:
return np.argmax(probs)
probs = probs.astype(np.float64) #convert to float64 for higher precision
probs = np.log(probs) / temperature
probs = np.exp(probs) / math.fsum(np.exp(probs))
return np.argmax(np.random.multinomial(1, probs, 1))
#generate a sentence given conv_hidden
def mean(data: Iterable[float]) -> float:
'Accurate arithmetic mean'
data = list(data)
return fsum(data) / len(data)
def dist(p: Point, q: Point, sqrt=sqrt, fsum=fsum, zip=zip) -> float:
'Multi-dimensional euclidean distance'
return sqrt(fsum((x1 - x2) ** 2.0 for x1, x2 in zip(p, q)))
def describe(data):
'Simple reducer for descriptive statistics'
n = len(data)
lo = min(data)
hi = max(data)
mean = fsum(data) / n
std_dev = (fsum((x - mean) ** 2 for x in data) / n) ** 0.5
return Summary(n, lo, mean, hi, std_dev)
def get_chi2(self, fit_blending=None):
"""
Calculates chi^2 of current model by fitting for source and
blending fluxes.
Parameters :
fit_blending: *boolean*, optional
If True, then the blend flux is a free parameter. If
False, the blend flux is fixed at zero. Default is
the same as :py:func:`MulensModel.fit.Fit.fit_fluxes()`.
Returns :
chi2: *float*
Chi^2 value
"""
chi2_per_point = self.get_chi2_per_point(
fit_blending=fit_blending)
# Calculate chi^2 given the fit
chi2 = []
for i, dataset in enumerate(self.datasets):
# Calculate chi2 for the dataset excluding bad data
select = np.logical_not(dataset.bad)
chi2.append(fsum(chi2_per_point[i][select]))
self.chi2 = fsum(chi2)
if self.best_chi2 is None or self.best_chi2 > self.chi2:
self.best_chi2 = self.chi2
self.best_chi2_parameters = dict(self.model.parameters.parameters)
return self.chi2
def _point_source_WM95(self, source_x, source_y):
"""calculate point source magnification using Witt & Mao 1995"""
return fsum(abs(self._signed_magnification_WM95(
source_x=source_x, source_y=source_y)))
def _get_magnification_w_plus(self, source_x, source_y, radius,
magnification_center=None):
"""Evaluates Gould (2008) eq. 7"""
dx = [1., 0., -1., 0.]
dy = [0., 1., 0., -1.]
out = []
for (i, dxval) in enumerate(dx):
x = source_x + dxval * radius
y = source_y + dy[i] * radius
out.append(self.point_source_magnification(
source_x=x, source_y=y))
if magnification_center is None:
magnification_center = self.point_source_magnification(
source_x=source_x, source_y=source_y)
return 0.25 * fsum(out) - magnification_center
def bleu(candidate, references, weights):
"""
Calculate BLEU for a single sentence, comment by atma
The result of this code is same as the most popular perl script
eg:
weight = [0.25, 0.25, 0.25, 0.25]
can = 'It is a guide to action which ensures that the military always obeys the commands of the party'.lower().split()
ref1 = 'It is a guide to action that ensures that the military will forever heed Party commands'.lower().split()
ref2 = 'It is the guiding principle which guarantees the military forces always being under the command of the Party'.lower().split()
ref = [ref1, ref2]
print bleu(can, ref, weight)
:param candidate: word list of one sentence, eg: ['I', 'like', 'eat', 'apple']
:param references: list of ref, each is a list of word, eg [['I', 'like', 'eat', 'apple'],['I', 'like', 'apple']]
:param weights: a list of weight
:return: return the bleu score
"""
p_ns = ( MP(candidate, references, i) for i, _ in enumerate(weights, start=1))
s = []
for w, p_n in zip(weights, p_ns):
try:
s.append(w * math.log(p_n))
except ValueError:
s.append(0)
s = math.fsum(s)
bp = BP(candidate, references)
return bp * math.exp(s)
def calculateCentroid(self):
if len(self.points) > 0 :
# Finds a virtual center point for a group of n-dimensional points
numPoints = len(self.points)
# Get a list of all coordinates in this cluster
coords = [p.coords for p in self.points]
print('cluster has: '+ str(numPoints) + ' point')
# Reformat that so all x's are together, all y'z etc.
unzipped = zip(*coords)
# Calculate the mean for each dimension
centroid_coords = [math.fsum(dList)/numPoints for dList in unzipped]
return Point(centroid_coords,'Centroid')
else:
return self.centroid
def calc_mean_onbit_density(bitsets, number_of_bits):
"""Calculate the mean density of bits that are on in bitsets collection.
Args:
bitsets (list[intbitset.intbitset]): List of fingerprints
number_of_bits: Number of bits for all fingerprints
Returns:
float: Mean on bit density
"""
all_nr_onbits = [len(v) for v in bitsets]
mean_onbit = fsum(all_nr_onbits) / float(len(all_nr_onbits))
density = mean_onbit / number_of_bits
return float(density)
def _get_weights(pts):
'''Given a number of points in [-1, 1], according to
On some Gauss and Lobatto based integration formulae,
T. N. L. Patterson,
Math. Comp. 22 (1968), 877-881,
one can compute the corresponding weights. One reads there:
> Thus the weights of an n-point integration formula [...] are given by
>
> omega_i = int_{-1}^{1} L_i(x) dx,
>
> (where L_i is the Lagrange polynomial for the point x_i).
> These weights can be evaluated exactly in a numerically stable fashion
> using a Gauss formula with n/2 points when n is even and (n + 1)/2 points
> when n is odd.
'''
n = len(pts)
# Unnormalized Lagrange polynomial: Degree n, 0 at all x_j except x_i.
def L(i, x):
return numpy.prod([(x - pts[j]) for j in range(n) if j != i], axis=0)
# Gauss-Legendre of order k integrates polynomials of degree 2*k-1 exactly.
# L has degree n-1, so k needs to be n/2 if n is even, and (n+1)/2 if n is
# odd.
k = (n // 2) - 1 if n % 2 == 0 else (n+1) // 2
return numpy.array([
integrate(
lambda x, i=i: L(i, x[0]),
numpy.array([[-1.0], [1.0]]),
GaussLegendre(k),
sumfun=lambda a: numpy.array([math.fsum(a)])
)[0]
/
numpy.prod([(pts[i] - pts[j]) for j in range(n) if j != i])
for i in range(n)
])
def plot_disks_1d(plt, pts, weights, total_area):
'''Plot a circles at quadrature points according to weights. The diameters
sum up to the total area.
'''
radii = 0.5 * abs(weights)/math.fsum(weights) * total_area
colors = [
# use matplotlib 2.0's color scheme
'#1f77b4' if weight >= 0 else '#d62728'
for weight in weights
]
_plot_disks_helpers(plt, pts, radii, colors)
return
def plot_disks(plt, pts, weights, total_area):
'''Plot a circles at quadrature points according to weights.
'''
flt = numpy.vectorize(float)
pts = flt(pts)
weights = flt(weights)
radii = numpy.sqrt(abs(weights)/math.fsum(weights) * total_area/math.pi)
colors = [
# use matplotlib 2.0's color scheme
'#1f77b4' if weight >= 0 else '#d62728'
for weight in weights
]
_plot_disks_helpers(plt, pts, radii, colors)
return
def best_dir(self):
right = math.fsum(self.data[0:len(self.data)/2])
left = math.fsum(self.data[len(self.data)/2:])
return 0 if left == right else 1 if left > right else -1
def average(self):
return math.fsum(self.timings) / len(self.timings)
def stdev(self):
mean = self.average
return (math.fsum([(x - mean) ** 2 for x in self.timings]) / len(self.timings)) ** 0.5
def run(
mesh,
volume,
convol_norms, ce_ratio_norms, cellvol_norms,
tol=1.0e-12
):
# Check cell volumes.
total_cellvolume = fsum(mesh.cell_volumes)
assert abs(volume - total_cellvolume) < tol * volume
norm2 = numpy.linalg.norm(mesh.cell_volumes, ord=2)
norm_inf = numpy.linalg.norm(mesh.cell_volumes, ord=numpy.Inf)
assert near_equal(cellvol_norms, [norm2, norm_inf], tol)
# If everything is Delaunay and the boundary elements aren't flat, the
# volume of the domain is given by
# 1/n * edge_lengths * ce_ratios.
# Unfortunately, this isn't always the case.
#
# total_ce_ratio = \
# fsum(mesh.edge_lengths**2 * mesh.get_ce_ratios_per_edge() / dim)
# self.assertAlmostEqual(volume, total_ce_ratio, delta=tol * volume)
# ```
# Check ce_ratio norms.
# TODO reinstate
alpha2 = fsum((mesh.get_ce_ratios()**2).flat)
alpha_inf = max(abs(mesh.get_ce_ratios()).flat)
assert near_equal(ce_ratio_norms, [alpha2, alpha_inf], tol)
# Check the volume by summing over the absolute value of the control
# volumes.
vol = fsum(mesh.get_control_volumes())
assert abs(volume - vol) < tol*volume
# Check control volume norms.
norm2 = numpy.linalg.norm(mesh.get_control_volumes(), ord=2)
norm_inf = numpy.linalg.norm(mesh.get_control_volumes(), ord=numpy.Inf)
assert near_equal(convol_norms, [norm2, norm_inf], tol)
return
```