def trapzint(f, a, b, bins=10000):
zbins = np.logspace(np.log10(a + 1), np.log10(b + 1), bins) - 1
return np.trapz(f(zbins[:-1]), x=zbins[:-1], dx=np.diff(zbins))
python类trapz()的实例源码
def evaluate_recall(self, candidate_boxes, ar_thresh=0.5):
# Record max overlap value for each gt box
# Return vector of overlap values
gt_overlaps = np.zeros(0)
for i in xrange(self.num_images):
gt_inds = np.where(self.roidb[i]['gt_classes'] > 0)[0]
gt_boxes = self.roidb[i]['boxes'][gt_inds, :]
boxes = candidate_boxes[i]
if boxes.shape[0] == 0:
continue
overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
# gt_overlaps = np.hstack((gt_overlaps, overlaps.max(axis=0)))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in xrange(gt_boxes.shape[0]):
argmax_overlaps = overlaps.argmax(axis=0)
max_overlaps = overlaps.max(axis=0)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert(gt_ovr >= 0)
box_ind = argmax_overlaps[gt_ind]
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert(_gt_overlaps[j] == gt_ovr)
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
num_pos = gt_overlaps.size
gt_overlaps = np.sort(gt_overlaps)
step = 0.001
thresholds = np.minimum(np.arange(0.5, 1.0 + step, step), 1.0)
recalls = np.zeros_like(thresholds)
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
ar = 2 * np.trapz(recalls, thresholds)
return ar, gt_overlaps, recalls, thresholds
def evaluate_recall(self, candidate_boxes, ar_thresh=0.5):
# Record max overlap value for each gt box
# Return vector of overlap values
gt_overlaps = np.zeros(0)
for i in xrange(self.num_images):
gt_inds = np.where(self.roidb[i]['gt_classes'] > 0)[0]
gt_boxes = self.roidb[i]['boxes'][gt_inds, :]
boxes = candidate_boxes[i]
if boxes.shape[0] == 0:
continue
overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
# gt_overlaps = np.hstack((gt_overlaps, overlaps.max(axis=0)))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in xrange(gt_boxes.shape[0]):
argmax_overlaps = overlaps.argmax(axis=0)
max_overlaps = overlaps.max(axis=0)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert(gt_ovr >= 0)
box_ind = argmax_overlaps[gt_ind]
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert(_gt_overlaps[j] == gt_ovr)
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
num_pos = gt_overlaps.size
gt_overlaps = np.sort(gt_overlaps)
step = 0.001
thresholds = np.minimum(np.arange(0.5, 1.0 + step, step), 1.0)
recalls = np.zeros_like(thresholds)
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
ar = 2 * np.trapz(recalls, thresholds)
return ar, gt_overlaps, recalls, thresholds
def evaluate_recall(self, candidate_boxes, ar_thresh=0.5):
# Record max overlap value for each gt box
# Return vector of overlap values
gt_overlaps = np.zeros(0)
for i in xrange(self.num_images):
gt_inds = np.where(self.roidb[i]['gt_classes'] > 0)[0]
gt_boxes = self.roidb[i]['boxes'][gt_inds, :]
boxes = candidate_boxes[i]
if boxes.shape[0] == 0:
continue
overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
# gt_overlaps = np.hstack((gt_overlaps, overlaps.max(axis=0)))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in xrange(gt_boxes.shape[0]):
argmax_overlaps = overlaps.argmax(axis=0)
max_overlaps = overlaps.max(axis=0)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert(gt_ovr >= 0)
box_ind = argmax_overlaps[gt_ind]
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert(_gt_overlaps[j] == gt_ovr)
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
num_pos = gt_overlaps.size
gt_overlaps = np.sort(gt_overlaps)
step = 0.001
thresholds = np.minimum(np.arange(0.5, 1.0 + step, step), 1.0)
recalls = np.zeros_like(thresholds)
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
ar = 2 * np.trapz(recalls, thresholds)
return ar, gt_overlaps, recalls, thresholds
def evaluate_recall(self, candidate_boxes, ar_thresh=0.5):
# Record max overlap value for each gt box
# Return vector of overlap values
gt_overlaps = np.zeros(0)
for i in xrange(self.num_images):
gt_inds = np.where(self.roidb[i]['gt_classes'] > 0)[0]
gt_boxes = self.roidb[i]['boxes'][gt_inds, :]
boxes = candidate_boxes[i]
if boxes.shape[0] == 0:
continue
overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
# gt_overlaps = np.hstack((gt_overlaps, overlaps.max(axis=0)))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in xrange(gt_boxes.shape[0]):
argmax_overlaps = overlaps.argmax(axis=0)
max_overlaps = overlaps.max(axis=0)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert(gt_ovr >= 0)
box_ind = argmax_overlaps[gt_ind]
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert(_gt_overlaps[j] == gt_ovr)
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
num_pos = gt_overlaps.size
gt_overlaps = np.sort(gt_overlaps)
step = 0.001
thresholds = np.minimum(np.arange(0.5, 1.0 + step, step), 1.0)
recalls = np.zeros_like(thresholds)
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
ar = 2 * np.trapz(recalls, thresholds)
return ar, gt_overlaps, recalls, thresholds
def evaluate_recall(self, candidate_boxes, ar_thresh=0.5):
# Record max overlap value for each gt box
# Return vector of overlap values
gt_overlaps = np.zeros(0)
for i in xrange(self.num_images):
gt_inds = np.where(self.roidb[i]['gt_classes'] > 0)[0]
gt_boxes = self.roidb[i]['boxes'][gt_inds, :]
boxes = candidate_boxes[i]
if boxes.shape[0] == 0:
continue
overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
# gt_overlaps = np.hstack((gt_overlaps, overlaps.max(axis=0)))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in xrange(gt_boxes.shape[0]):
argmax_overlaps = overlaps.argmax(axis=0)
max_overlaps = overlaps.max(axis=0)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert(gt_ovr >= 0)
box_ind = argmax_overlaps[gt_ind]
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert(_gt_overlaps[j] == gt_ovr)
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
num_pos = gt_overlaps.size
gt_overlaps = np.sort(gt_overlaps)
step = 0.001
thresholds = np.minimum(np.arange(0.5, 1.0 + step, step), 1.0)
recalls = np.zeros_like(thresholds)
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
ar = 2 * np.trapz(recalls, thresholds)
return ar, gt_overlaps, recalls, thresholds
def precisionAuc(positions, groundTruth, radius, nStep):
thres = np.linspace(0, radius, nStep)
errs = np.zeros([nStep], dtype=np.float32)
distances = np.sqrt(np.power(positions[:, 0]-groundTruth[:, 0], 2)+np.power(positions[:, 1]-groundTruth[:, 1], 2))
distances[np.where(np.isnan(distances))] = []
for p in range(0, nStep):
errs[p] = np.shape(np.where(distances > thres[p]))[-1]
score = np.trapz(errs)
return score
def precisionAuc(positions, groundTruth, radius, nStep):
thres = np.linspace(0, radius, nStep)
errs = np.zeros([nStep], dtype=np.float32)
distances = np.sqrt(np.power(positions[:, 0]-groundTruth[:, 0], 2)+np.power(positions[:, 1]-groundTruth[:, 1], 2))
distances[np.where(np.isnan(distances))] = []
for p in range(0, nStep):
errs[p] = np.shape(np.where(distances > thres[p]))[-1]
score = np.trapz(errs)
return score
def obj_func_cont(xx, e_g, e_p, g, rho, Q_g, Q_p, head_g, head_p, optimizing = True):
H_T = int(price_duration.Frequency.max()) # total duration (100%)
x1 = np.arange(0,xx)
y1 = f(x1)
x2 = np.arange(H_T-xx,H_T)
y2 = f(x2)
Power_Revenue = np.trapz(y1, x1, dx=0.1, axis = -1)*e_g*rho*g*Q_g*head_g/(10**6)
Pumping_Cost = np.trapz(y2, x2, dx=0.1, axis = -1)/e_p*rho*g*Q_p*head_p/(10**6)
z = Power_Revenue - Pumping_Cost # profit
return -z if optimizing else z
# objective function to maximize - discrete
def evaluate_recall(self, candidate_boxes, ar_thresh=0.5):
# Record max overlap value for each gt box
# Return vector of overlap values
gt_overlaps = np.zeros(0)
for i in range(self.num_images):
gt_inds = np.where(self.roidb[i]['gt_classes'] > 0)[0]
gt_boxes = self.roidb[i]['boxes'][gt_inds, :]
boxes = candidate_boxes[i]
if boxes.shape[0] == 0:
continue
overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
# gt_overlaps = np.hstack((gt_overlaps, overlaps.max(axis=0)))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in range(gt_boxes.shape[0]):
argmax_overlaps = overlaps.argmax(axis=0)
max_overlaps = overlaps.max(axis=0)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert(gt_ovr >= 0)
box_ind = argmax_overlaps[gt_ind]
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert(_gt_overlaps[j] == gt_ovr)
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
num_pos = gt_overlaps.size
gt_overlaps = np.sort(gt_overlaps)
step = 0.001
thresholds = np.minimum(np.arange(0.5, 1.0 + step, step), 1.0)
recalls = np.zeros_like(thresholds)
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
ar = 2 * np.trapz(recalls, thresholds)
return ar, gt_overlaps, recalls, thresholds
def _get_indice(cls, w, flux, blue, red, band=None, unit='ew', degree=1,
**kwargs):
""" compute spectral index after continuum subtraction
Parameters
----------
w: ndarray (nw, )
array of wavelengths in AA
flux: ndarray (N, nw)
array of flux values for different spectra in the series
blue: tuple(2)
selection for blue continuum estimate
red: tuple(2)
selection for red continuum estimate
band: tuple(2), optional
select region in this band only.
default is band = (min(blue), max(red))
unit: str
`ew` or `mag` wether equivalent width or magnitude
degree: int (default 1)
degree of the polynomial fit to the continuum
Returns
-------
ew: ndarray (N,)
equivalent width array
"""
wi, fi = cls.continuum_normalized_region_around_line(w, flux, blue,
red, band=band,
degree=degree)
if unit in (0, 'ew', 'EW'):
return np.trapz(1. - fi, wi, axis=-1)
else:
m = np.trapz(fi, wi, axis=-1)
m = -2.5 * np.log10(m / np.ptp(wi))
return m
def __init__(self, wavelength, transmit, name='', dtype="photon", unit=None):
"""Constructor"""
self.name = name
try: # get units from the inputs
self._wavelength = wavelength.magnitude
self.wavelength_unit = str(wavelength.units)
except AttributeError:
self._wavelength = wavelength
self.wavelength_unit = unit
self.transmit = transmit
self.norm = trapz(transmit, self._wavelength)
self._lT = trapz(self._wavelength * transmit, self._wavelength)
self._lpivot = np.sqrt( self._lT / trapz(transmit / self._wavelength, self._wavelength) )
self._cl = self._lT / self.norm
self.set_dtype(dtype)
def leff(self):
""" Unitwise Effective wavelength
leff = int (lamb * T * Vega dlamb) / int(T * Vega dlamb)
"""
with Vega() as v:
s = self.reinterp(v.wavelength)
w = s._wavelength
leff = np.trapz(w * s.transmit * v.flux.magnitude, w, axis=-1)
leff /= np.trapz(s.transmit * v.flux.magnitude, w, axis=-1)
if self.wavelength_unit is not None:
return leff * unit[self.wavelength_unit]
else:
return leff
def evaluate_recall(self, candidate_boxes, ar_thresh=0.5):
# Record max overlap value for each gt box
# Return vector of overlap values
gt_overlaps = np.zeros(0)
for i in xrange(self.num_images):
gt_inds = np.where(self.roidb[i]['gt_classes'] > 0)[0]
gt_boxes = self.roidb[i]['boxes'][gt_inds, :]
boxes = candidate_boxes[i]
if boxes.shape[0] == 0:
continue
overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
# gt_overlaps = np.hstack((gt_overlaps, overlaps.max(axis=0)))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in xrange(gt_boxes.shape[0]):
argmax_overlaps = overlaps.argmax(axis=0)
max_overlaps = overlaps.max(axis=0)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert(gt_ovr >= 0)
box_ind = argmax_overlaps[gt_ind]
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert(_gt_overlaps[j] == gt_ovr)
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
num_pos = gt_overlaps.size
gt_overlaps = np.sort(gt_overlaps)
step = 0.001
thresholds = np.minimum(np.arange(0.5, 1.0 + step, step), 1.0)
recalls = np.zeros_like(thresholds)
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
ar = 2 * np.trapz(recalls, thresholds)
return ar, gt_overlaps, recalls, thresholds
def energy_radiated_approximation_and_farfield2(self,trajectory, gamma, x, y, distance):
# N = trajectory.shape[1]
N = trajectory.nb_points()
if distance == None:
# in radian :
n_chap = np.array([x, y, 1.0 - 0.5 * (x ** 2 + y ** 2)])
X = np.sqrt(x ** 2 + y ** 2 )#TODO a changer
#in meters :
else :
X = np.sqrt(x ** 2 + y ** 2 + distance ** 2)
n_chap = np.array([x, y, distance]) / X
E = np.zeros((3,), dtype=np.complex)
integrand = np.zeros((3, N), dtype=np.complex)
A1 = (n_chap[0] * trajectory.a_x + n_chap[1] * trajectory.a_y + n_chap[2] * trajectory.a_z)
A2 = (n_chap[0] * (n_chap[0] - trajectory.v_x) + n_chap[1] * (n_chap[1] - trajectory.v_y)
+ n_chap[2] * (n_chap[2] - trajectory.v_z))
Alpha2 = np.exp(
0. + 1j * self.photon_frequency * (trajectory.t + X / codata.c - n_chap[0] * trajectory.x
- n_chap[1] * trajectory.y - n_chap[2] * trajectory.z))
Alpha1 = (1.0 / (1.0 - n_chap[0] * trajectory.v_x
- n_chap[1] * trajectory.v_y - n_chap[2] * trajectory.v_z)) ** 2
integrand[0] += (A1 * (n_chap[0] - trajectory.v_x) - A2 * trajectory.a_x) * Alpha2 * Alpha1
integrand[1] += (A1 * (n_chap[1] - trajectory.v_y) - A2 * trajectory.a_y) * Alpha2 * Alpha1
integrand[2] += (A1 * (n_chap[2] - trajectory.v_z) - A2 * trajectory.a_z) * Alpha2 * Alpha1
for k in range(3):
# E[k] = np.trapz(integrand[k], self.trajectory.t)
E[k] = np.trapz(integrand[k], trajectory.t)
return E
def energy_radiated_farfield2(self, trajectory, gamma, x, y, distance):
N = trajectory.nb_points()
if distance == None:
# in radian :
n_chap = np.array([x, y, 1.0 - 0.5 * (x ** 2 + y ** 2)])
X = np.sqrt(x ** 2 + y ** 2 )#TODO a changer
#in meters :
else :
X = np.sqrt(x ** 2 + y ** 2 + distance ** 2)
n_chap = np.array([x, y, distance]) / X
R= X/codata.c - n_chap[0] * trajectory.x- n_chap[1] * trajectory.y - n_chap[2] * trajectory.z
E = np.zeros((3,), dtype=np.complex)
integrand = np.zeros((3, N), dtype=np.complex)
A1 = (n_chap[0] * trajectory.a_x + n_chap[1] * trajectory.a_y + n_chap[2] * trajectory.a_z)
A2 = (n_chap[0] * (n_chap[0] - trajectory.v_x) + n_chap[1] * (n_chap[1] - trajectory.v_y)
+ n_chap[2] * (n_chap[2] - trajectory.v_z))
Alpha2 = np.exp(0. + 1j * self.photon_frequency * (trajectory.t + R))
Alpha1 = (1.0 / (1.0 - n_chap[0] * trajectory.v_x
- n_chap[1] * trajectory.v_y - n_chap[2] * trajectory.v_z)) ** 2
cst=codata.c/(R*gamma**2)
integrand[0] += (A1 * (n_chap[0] - trajectory.v_x) - A2 * trajectory.a_x
+ cst * (n_chap[0] - trajectory.v_x) ) * Alpha2 * Alpha1
integrand[1] += (A1 * (n_chap[1] - trajectory.v_y) - A2 * trajectory.a_y
+ cst * (n_chap[1] - trajectory.v_y) ) * Alpha2 * Alpha1
integrand[2] += (A1 * (n_chap[2] - trajectory.v_z) - A2 * trajectory.a_z
+ cst * (n_chap[2] - trajectory.v_z) ) * Alpha2 * Alpha1
for k in range(3):
# E[k] = np.trapz(integrand[k], self.trajectory.t)
E[k] = np.trapz(integrand[k], trajectory.t)
E *= -1j * np.exp(1j * self.photon_frequency / codata.c * (n_chap[0] * x + n_chap[1] * y + n_chap[2] * distance))
return E
def energy_radiated_approx2(self,trajectory, gamma, x, y, distance):
N = trajectory.nb_points()
n_chap = np.array([x - trajectory.x * codata.c, y - trajectory.y * codata.c, distance - trajectory.z * codata.c])
# R = np.zeros(n_chap.shape[1])
# for i in range(n_chap.shape[1]):
# R[i] = np.linalg.norm(n_chap[:, i])
# n_chap[:, i] /= R[i]
R = np.sqrt( n_chap[0]**2 + n_chap[1]**2 + n_chap[2]**2 )
n_chap[0,:] /= R
n_chap[1,:] /= R
n_chap[2,:] /= R
E = np.zeros((3,), dtype=np.complex)
integrand = np.zeros((3, N), dtype=np.complex)
A1 = (n_chap[0] * trajectory.a_x + n_chap[1] * trajectory.a_y + n_chap[2] * trajectory.a_z)
A2 = (n_chap[0] * (n_chap[0] - trajectory.v_x) + n_chap[1] * (n_chap[1] - trajectory.v_y)
+ n_chap[2] * (n_chap[2] - trajectory.v_z))
Alpha2 = np.exp(0. + 1j * self.photon_frequency * (trajectory.t + R / codata.c))
Alpha1 = (1.0 / (1.0 - n_chap[0] * trajectory.v_x
- n_chap[1] * trajectory.v_y - n_chap[2] * trajectory.v_z)) ** 2
integrand[0] += ((A1 * (n_chap[0] - trajectory.v_x) - A2 * trajectory.a_x)
) * Alpha2 * Alpha1
integrand[1] += ((A1 * (n_chap[1] - trajectory.v_y) - A2 * trajectory.a_y)
) * Alpha2 * Alpha1
integrand[2] += ((A1 * (n_chap[2] - trajectory.v_z) - A2 * trajectory.a_z)
) * Alpha2 * Alpha1
for k in range(3):
E[k] = np.trapz(integrand[k], trajectory.t)
#E[k] = integrate.simps(integrand[k], trajectory.t)
return E
def integration(self,is_quadrant=0,use_flux_per_mrad2_or_mm2=1):
if (self.X is None or self.Y is None):
raise Exception(" X and Y must be array for integration")
if self.X.shape != self.Y.shape:
raise Exception(" X and Y must have the same shape")
if len(self.X.shape)==2 :
X = np.linspace(self.X.min(), self.X.max(), self.X.shape[0])
Y = np.linspace(self.Y.min(), self.Y.max(), self.Y.shape[1])
res1=integrate.trapz(self.intensity, X)
res = integrate.trapz(res1, Y)
# res = integrate.simps(integrate.simps(self.intensity, X), Y)
# res = self.intensity.sum() * (self.X[1,0] - self.X[0,0]) * (self.Y[0,1] - self.X[0,0]) # regular grid only
else : # X and Y are 1d array
if len(self.X) == 1:
res = self.intensity[0]
else: # choix arbitraire
XY = np.zeros_like(self.X)
for i in range(1, len(self.X)):
XY[i] = XY[i-1]+np.sqrt((self.X[i] - self.X[i-1]) * 2 + (self.Y[i] - self.Y[i-1]) ** 2)
res = np.trapz(self.intensity, XY)
# Note that the value of flux is in phot/s/0.1%bw/mrad2 (or .../mm2) and
# our grid is in rad (or m), therefore we must account for this:
if use_flux_per_mrad2_or_mm2:
res *= 1e6
# in case the calculation is for a quadrant, the integral is four times the calculated value
if is_quadrant:
res *= 4
return res
def kde(self):
""" Calculate kernel density estimator distribution function """
plot = True
# Input data
x_data = np.linspace(-math.pi, math.pi, 1000)
# Kernels, centered at input data points
kernels = []
for datapoint in self.data:
# Make the basis function as a von mises PDF
kernel = self.vonMisesPDF(x_data, mu=datapoint)
kernels.append(kernel)
# Handle weights
if len(self.weights) > 0:
kernels = np.asarray(kernels)
weighted_kernels = np.multiply(kernels, self.weights[:, None])
else:
weighted_kernels = kernels
# Normalize pdf
vmkde = np.sum(weighted_kernels, axis=0)
vmkde = vmkde / np.trapz(vmkde, x=x_data)
self.fn = interp1d(x_data, vmkde)