def evaluation(self, X_test, y_test):
# normalization
X_test = self.normalization(X_test)
# average over the output
pred_y_test = np.zeros([self.M, len(y_test)])
prob = np.zeros([self.M, len(y_test)])
'''
Since we have M particles, we use a Bayesian view to calculate rmse and log-likelihood
'''
for i in range(self.M):
w1, b1, w2, b2, loggamma, loglambda = self.unpack_weights(self.theta[i, :])
pred_y_test[i, :] = self.nn_predict(X_test, w1, b1, w2, b2) * self.std_y_train + self.mean_y_train
prob[i, :] = np.sqrt(np.exp(loggamma)) /np.sqrt(2*np.pi) * np.exp( -1 * (np.power(pred_y_test[i, :] - y_test, 2) / 2) * np.exp(loggamma) )
pred = np.mean(pred_y_test, axis=0)
# evaluation
svgd_rmse = np.sqrt(np.mean((pred - y_test)**2))
svgd_ll = np.mean(np.log(np.mean(prob, axis = 0)))
return (svgd_rmse, svgd_ll)
python类pi()的实例源码
def nufft_scale1(N, K, alpha, beta, Nmid):
'''
calculate image space scaling factor
'''
# import types
# if alpha is types.ComplexType:
alpha = numpy.real(alpha)
# print('complex alpha may not work, but I just let it as')
L = len(alpha) - 1
if L > 0:
sn = numpy.zeros((N, 1))
n = numpy.arange(0, N).reshape((N, 1), order='F')
i_gam_n_n0 = 1j * (2 * numpy.pi / K) * (n - Nmid) * beta
for l1 in range(-L, L + 1):
alf = alpha[abs(l1)]
if l1 < 0:
alf = numpy.conj(alf)
sn = sn + alf * numpy.exp(i_gam_n_n0 * l1)
else:
sn = numpy.dot(alpha, numpy.ones((N, 1), dtype=numpy.float32))
return sn
def nufft_r(om, N, J, K, alpha, beta):
'''
equation (30) of Fessler's paper
'''
M = numpy.size(om) # 1D size
gam = 2.0 * numpy.pi / (K * 1.0)
nufft_offset0 = nufft_offset(om, J, K) # om/gam - nufft_offset , [M,1]
dk = 1.0 * om / gam - nufft_offset0 # om/gam - nufft_offset , [M,1]
arg = outer_sum(-numpy.arange(1, J + 1) * 1.0, dk)
L = numpy.size(alpha) - 1
# print('alpha',alpha)
rr = numpy.zeros((J, M), dtype=numpy.float32)
rr = iterate_l1(L, alpha, arg, beta, K, N, rr)
return (rr, arg)
def kaiser_bessel_ft(u, J, alpha, kb_m, d):
'''
Interpolation weight for given J/alpha/kb-m
'''
u = u * (1.0 + 0.0j)
import scipy.special
z = numpy.sqrt((2 * numpy.pi * (J / 2) * u) ** 2.0 - alpha ** 2.0)
nu = d / 2 + kb_m
y = ((2 * numpy.pi) ** (d / 2)) * ((J / 2) ** d) * (alpha ** kb_m) / \
scipy.special.iv(kb_m, alpha) * scipy.special.jv(nu, z) / (z ** nu)
y = numpy.real(y)
return y
def nufft_scale1(N, K, alpha, beta, Nmid):
'''
Calculate image space scaling factor
'''
# import types
# if alpha is types.ComplexType:
alpha = numpy.real(alpha)
# print('complex alpha may not work, but I just let it as')
L = len(alpha) - 1
if L > 0:
sn = numpy.zeros((N, 1))
n = numpy.arange(0, N).reshape((N, 1), order='F')
i_gam_n_n0 = 1j * (2 * numpy.pi / K) * (n - Nmid) * beta
for l1 in range(-L, L + 1):
alf = alpha[abs(l1)]
if l1 < 0:
alf = numpy.conj(alf)
sn = sn + alf * numpy.exp(i_gam_n_n0 * l1)
else:
sn = numpy.dot(alpha, numpy.ones((N, 1)))
return sn
def planetary_radius(mass, radius):
"""Calculate planetary radius if not given assuming a density dependent on
mass"""
if not isinstance(mass, (int, float)):
if isinstance(radius, (int, float)):
return radius
else:
return '...'
if mass < 0:
raise ValueError('Only positive planetary masses allowed.')
Mj = c.M_jup
Rj = c.R_jup
if radius == '...' and isinstance(mass, (int, float)):
if mass < 0.01: # Earth density
rho = 5.51
elif 0.01 <= mass <= 0.5:
rho = 1.64 # Neptune density
else:
rho = Mj/(4./3*np.pi*Rj**3) # Jupiter density
R = ((mass*Mj)/(4./3*np.pi*rho))**(1./3) # Neptune density
R /= Rj
else:
return radius
return R.value
def test_kbd():
M = 100
w = mdct.windows.kaiser_derived(M, beta=4.)
assert numpy.allclose(w[:M//2] ** 2 + w[-M//2:] ** 2, 1.)
with pytest.raises(ValueError):
mdct.windows.kaiser_derived(M + 1, beta=4.)
assert numpy.allclose(
mdct.windows.kaiser_derived(2, beta=numpy.pi/2)[:1],
[numpy.sqrt(2)/2])
assert numpy.allclose(
mdct.windows.kaiser_derived(4, beta=numpy.pi/2)[:2],
[0.518562710536, 0.855039598640])
assert numpy.allclose(
mdct.windows.kaiser_derived(6, beta=numpy.pi/2)[:3],
[0.436168993154, 0.707106781187, 0.899864772847])
def gaussian_kernel(kernel_shape, sigma=None):
"""
Get 2D Gaussian kernel
:param kernel_shape: kernel size
:param sigma: sigma of Gaussian distribution
:return: 2D Gaussian kernel
"""
kern = numpy.zeros((kernel_shape, kernel_shape), dtype='float32')
# get sigma from kernel size
if sigma is None:
sigma = 0.3*((kernel_shape-1.)*0.5 - 1.) + 0.8
def gauss(x, y, s):
Z = 2. * numpy.pi * s ** 2.
return 1. / Z * numpy.exp(-(x ** 2. + y ** 2.) / (2. * s ** 2.))
mid = numpy.floor(kernel_shape / 2.)
for i in xrange(0, kernel_shape):
for j in xrange(0, kernel_shape):
kern[i, j] = gauss(i - mid, j - mid, sigma)
return kern / kern.sum()
def is_grid(self, grid, image):
"""
Checks the "gridness" by analyzing the results of a hough transform.
:param grid: binary image
:return: wheter the object in the image might be a grid or not
"""
# - Distance resolution = 1 pixel
# - Angle resolution = 1° degree for high line density
# - Threshold = 144 hough intersections
# 8px digit + 3*2px white + 2*1px border = 16px per cell
# => 144x144 grid
# 144 - minimum number of points on the same line
# (but due to imperfections in the binarized image it's highly
# improbable to detect a 144x144 grid)
lines = cv2.HoughLines(grid, 1, np.pi / 180, 144)
if lines is not None and np.size(lines) >= 20:
lines = lines.reshape((lines.size / 2), 2)
# theta in [0, pi] (theta > pi => rho < 0)
# normalise theta in [-pi, pi] and negatives rho
lines[lines[:, 0] < 0, 1] -= np.pi
lines[lines[:, 0] < 0, 0] *= -1
criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)
# split lines into 2 groups to check whether they're perpendicular
if cv2.__version__[0] == '2':
density, clmap, centers = cv2.kmeans(
lines[:, 1], 2, criteria, 5, cv2.KMEANS_RANDOM_CENTERS)
else:
density, clmap, centers = cv2.kmeans(
lines[:, 1], 2, None, criteria,
5, cv2.KMEANS_RANDOM_CENTERS)
if self.debug:
self.save_hough(lines, clmap)
# Overall variance from respective centers
var = density / np.size(clmap)
sin = abs(np.sin(centers[0] - centers[1]))
# It is probably a grid only if:
# - centroids difference is almost a 90° angle (+-15° limit)
# - variance is less than 5° (keeping in mind surface distortions)
return sin > 0.99 and var <= (5*np.pi / 180) ** 2
else:
return False
def save_hough(self, lines, clmap):
"""
:param lines: (rho, theta) pairs
:param clmap: clusters assigned to lines
:return: None
"""
height, width = self.image.shape
ratio = 600. * (self.step+1) / min(height, width)
temp = cv2.resize(self.image, None, fx=ratio, fy=ratio,
interpolation=cv2.INTER_CUBIC)
temp = cv2.cvtColor(temp, cv2.COLOR_GRAY2BGR)
colors = [(0, 127, 255), (255, 0, 127)]
for i in range(0, np.size(lines) / 2):
rho = lines[i, 0]
theta = lines[i, 1]
color = colors[clmap[i, 0]]
if theta < np.pi / 4 or theta > 3 * np.pi / 4:
pt1 = (rho / np.cos(theta), 0)
pt2 = (rho - height * np.sin(theta) / np.cos(theta), height)
else:
pt1 = (0, rho / np.sin(theta))
pt2 = (width, (rho - width * np.cos(theta)) / np.sin(theta))
pt1 = (int(pt1[0]), int(pt1[1]))
pt2 = (int(pt2[0]), int(pt2[1]))
cv2.line(temp, pt1, pt2, color, 5)
self.save2image(temp)
def is_grid(self, grid, image):
"""
Checks the "gridness" by analyzing the results of a hough transform.
:param grid: binary image
:return: wheter the object in the image might be a grid or not
"""
# - Distance resolution = 1 pixel
# - Angle resolution = 1° degree for high line density
# - Threshold = 144 hough intersections
# 8px digit + 3*2px white + 2*1px border = 16px per cell
# => 144x144 grid
# 144 - minimum number of points on the same line
# (but due to imperfections in the binarized image it's highly
# improbable to detect a 144x144 grid)
lines = cv2.HoughLines(grid, 1, np.pi / 180, 144)
if lines is not None and np.size(lines) >= 20:
lines = lines.reshape((lines.size/2), 2)
# theta in [0, pi] (theta > pi => rho < 0)
# normalise theta in [-pi, pi] and negatives rho
lines[lines[:, 0] < 0, 1] -= np.pi
lines[lines[:, 0] < 0, 0] *= -1
criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)
# split lines into 2 groups to check whether they're perpendicular
if cv2.__version__[0] == '2':
density, clmap, centers = cv2.kmeans(
lines[:, 1], 2, criteria,
5, cv2.KMEANS_RANDOM_CENTERS)
else:
density, clmap, centers = cv2.kmeans(
lines[:, 1], 2, None, criteria,
5, cv2.KMEANS_RANDOM_CENTERS)
# Overall variance from respective centers
var = density / np.size(clmap)
sin = abs(np.sin(centers[0] - centers[1]))
# It is probably a grid only if:
# - centroids difference is almost a 90° angle (+-15° limit)
# - variance is less than 5° (keeping in mind surface distortions)
return sin > 0.99 and var <= (5*np.pi / 180) ** 2
else:
return False
def build_2D_cov_matrix(sigmax,sigmay,angle,verbose=True):
"""
Build a covariance matrix for a 2D multivariate Gaussian
--- INPUT ---
sigmax Standard deviation of the x-compoent of the multivariate Gaussian
sigmay Standard deviation of the y-compoent of the multivariate Gaussian
angle Angle to rotate matrix by in degrees (clockwise) to populate covariance cross terms
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
covmatrix = tu.build_2D_cov_matrix(3,1,35)
"""
if verbose: print ' - Build 2D covariance matrix with varinaces (x,y)=('+str(sigmax)+','+str(sigmay)+\
') and then rotated '+str(angle)+' degrees'
cov_orig = np.zeros([2,2])
cov_orig[0,0] = sigmay**2.0
cov_orig[1,1] = sigmax**2.0
angle_rad = (180.0-angle) * np.pi/180.0 # The (90-angle) makes sure the same convention as DS9 is used
c, s = np.cos(angle_rad), np.sin(angle_rad)
rotmatrix = np.matrix([[c, -s], [s, c]])
cov_rot = np.dot(np.dot(rotmatrix,cov_orig),np.transpose(rotmatrix)) # performing rot * cov * rot^T
return cov_rot
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def normalize_2D_cov_matrix(covmatrix,verbose=True):
"""
Calculate the normalization foctor for a multivariate gaussian from it's covariance matrix
However, not that gaussian returned by tu.gen_2Dgauss() is normalized for scale=1
--- INPUT ---
covmatrix covariance matrix to normaliz
verbose Toggle verbosity
"""
detcov = np.linalg.det(covmatrix)
normfac = 1.0 / (2.0 * np.pi * np.sqrt(detcov) )
return normfac
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def f(r,theta):
out = np.sin(theta)*np.cos(K*2*np.pi*(1./r))/r
out[-1] = 0
return out
def dfdr(r,theta):
out = (2*K*np.pi*np.sin(2*np.pi*K/r)
-r*np.cos(2*np.pi*K/r))*np.sin(theta)/(r**3)
out[-1] = 0
return out
def dfdrdtheta(r,theta):
out = (2*K*np.pi*np.sin(2*np.pi*K/r)
-r*np.cos(2*np.pi*K/r))*np.cos(theta)/(r**3)
out[-1] = 0
return out
def __init__(self,
order_X,r_h,
order_theta,
theta_min = 0,
theta_max = np.pi,
L=1):
"""Constructor.
Parameters
----------
order_X -- polynomial order in X direction
r_h -- physical minimum radius (uncompactified coordinates)
order_theta -- polynomial order in theta direction
theta_min -- minimum longitudinal value. Should be no less than 0.
theta_max -- maximum longitudinal value. Should be no greater than pi.
L -- Characteristic length scale of problem.
Needed for compactification
"""
self.order_X = order_X
self.order_theta = order_theta
self.r_h = r_h
self.theta_min = theta_min
self.theta_max = theta_max
self.L = L
super(PyballdDiscretization,self).__init__(order_X,
self.X_min,self.X_max,
order_theta,
theta_min,theta_max)
self.r = self.get_r_from_X(self.x)
self.R = self.get_r_from_X(self.X)
self.dRdX = self.get_drdX(self.X)
self.drdX = self.get_drdX(self.x)
self.dXdR = self.get_dXdr(self.X)
self.dXdr = self.get_dXdr(self.x)
self.d2XdR2 = self.get_d2Xdr2(self.X)
self.d2Xdr2 = self.get_d2Xdr2(self.x)
self.d2RdX2 = self.get_d2rdX2(self.X)
self.d2rdX2 = self.get_d2rdX2(self.x)
self.theta = self.y
self.THETA = self.Y
def get_integration_weights(order,nodes=None):
"""
Returns the integration weights for Gauss-Lobatto quadrature
as a function of the order of the polynomial we want to
represent.
See: https://en.wikipedia.org/wiki/Gaussian_quadrature
See: arXive:gr-qc/0609020v1
"""
if np.all(nodes == False):
nodes=get_quadrature_points(order)
if poly == polynomial.chebyshev.Chebyshev:
weights = np.empty((order+1))
weights[1:-1] = np.pi/order
weights[0] = np.pi/(2*order)
weights[-1] = weights[0]
return weights
elif poly == polynomial.legendre.Legendre:
interior_weights = 2/((order+1)*order*poly.basis(order)(nodes[1:-1])**2)
boundary_weights = np.array([1-0.5*np.sum(interior_weights)])
weights = np.concatenate((boundary_weights,
interior_weights,
boundary_weights))
return weights
else:
raise ValueError("Not a known polynomial type.")
return False
def gelu_fast(_x):
return 0.5 * _x * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (_x + 0.044715 * tf.pow(_x, 3))))
def gelu_fast(_x):
return 0.5 * _x * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (_x + 0.044715 * tf.pow(_x, 3))))