def sparse_optical_flow(im1, im2, pts, fb_threshold=-1,
window_size=15, max_level=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)):
# Forward flow
p1, st, err = cv2.calcOpticalFlowPyrLK(im1, im2, pts, None,
winSize=(window_size, window_size),
maxLevel=max_level, criteria=criteria )
# Backward flow
if fb_threshold > 0:
p0r, st0, err = cv2.calcOpticalFlowPyrLK(im2, im1, p1, None,
winSize=(window_size, window_size),
maxLevel=max_level, criteria=criteria)
p0r[st0 == 0] = np.nan
# Set only good
fb_good = (np.fabs(p0r-p0) < fb_threshold).all(axis=1)
p1[~fb_good] = np.nan
st = np.bitwise_and(st, st0)
err[~fb_good] = np.nan
return p1, st, err
python类fabs()的实例源码
def check_visibility(camera, pts_w, zmin=0, zmax=100):
"""
Check if points are visible given fov of camera.
This method checks for both horizontal and vertical
fov.
camera: type Camera
"""
# Transform points in to camera's reference
# Camera: p_cw
pts_c = camera.c2w(pts_w.reshape(-1, 3))
# Determine look-at vector, and check angle
# subtended with camera's z-vector (3rd column)
z = pts_c[:,2]
v = pts_c / np.linalg.norm(pts_c, axis=1).reshape(-1, 1)
hangle, vangle = np.arctan2(v[:,0], v[:,2]), np.arctan2(-v[:,1], v[:,2])
# Provides inds mask for all points that are within fov
return np.fabs(hangle) < camera.fov[0] * 0.5 and \
np.fabs(vangle) < camera.fov[1] * 0.5 and \
z >= zmin and z <= zmax
def test_multiple_calls(self):
"""Tests that the results are the same after calling multiple times
"""
bayes = Bayesian(simulations={'Gun': [self.sim1, self.exp1]},
models={'eos': self.eos_model},
opt_keys='eos')
sol1, hist1, sens1, fisher1 = bayes()
sol2, hist2, sens2, fisher2 = bayes()
npt.assert_almost_equal(hist1[0], hist2[0], decimal=4,
err_msg='Histories not equal for subsequent'
'runs')
npt.assert_almost_equal(sol1.models['eos'].get_dof() /
sol2.models['eos'].get_dof(),
np.ones(bayes.shape()[1]),
decimal=10,
err_msg='DOF not equal for subsequent runs')
npt.assert_almost_equal(np.fabs(sens1['Gun'] - sens2['Gun']),
np.zeros(sens1['Gun'].shape),
decimal=10)
def test_effvol_complete():
# Test that the effective volume == volume when the completeness == 1
tsf= gaia_tools.select.tgasSelectUniform(comp=1.)
tesf= gaia_tools.select.tgasEffectiveSelect(tsf)
dxy, dz, zmin= 0.2, 0.1, 0.15
v= tesf.volume(\
lambda x,y,z: cyl_vol_func(x,y,z,xymax=dxy,zmin=zmin,zmax=zmin+dz),
xyz=True)
v_exp= numpy.pi*dxy**2.*dz
assert(numpy.fabs(v/v_exp-1.) < 10.**-3.), 'Effective volume for unit completeness is not equal to the volume'
# Another one
dxy, dz, zmin= 0.2, 0.2, -0.15
v= tesf.volume(\
lambda x,y,z: cyl_vol_func(x,y,z,xymax=dxy,zmin=zmin,zmax=zmin+dz),
xyz=True,ndists=251)
v_exp= numpy.pi*dxy**2.*dz
assert(numpy.fabs(v/v_exp-1.) < 10.**-2.), 'Effective volume for unit completeness is not equal to the volume'
return None
def test_effvol_uniform_complete():
# Test that the effective volume == A x volume when the completeness == A
comp= 0.33
tsf= gaia_tools.select.tgasSelectUniform(comp=comp)
tesf= gaia_tools.select.tgasEffectiveSelect(tsf)
dxy, dz, zmin= 0.2, 0.1, 0.15
v= tesf.volume(\
lambda x,y,z: cyl_vol_func(x,y,z,xymax=dxy,zmin=zmin,zmax=zmin+dz),
xyz=True)
v_exp= numpy.pi*dxy**2.*dz*comp
assert(numpy.fabs(v/v_exp-1.) < 10.**-3.), 'Effective volume for unit completeness is not equal to the volume'
# Another one
dxy, dz, zmin= 0.2, 0.2, -0.15
v= tesf.volume(\
lambda x,y,z: cyl_vol_func(x,y,z,xymax=dxy,zmin=zmin,zmax=zmin+dz),
xyz=True,ndists=251)
v_exp= numpy.pi*dxy**2.*dz*comp
assert(numpy.fabs(v/v_exp-1.) < 10.**-2.), 'Effective volume for unit completeness is not equal to the volume'
return None
def test_effvol_uniform_complete_partialsky():
# Test that the effective volume == A x volume x sky-fraction when the completeness == A over a fraction of the sky for a spherical volume
comp= 0.33
ramin, ramax= 30., 120.
tsf= gaia_tools.select.tgasSelectUniform(comp=comp,ramin=ramin,ramax=ramax)
tesf= gaia_tools.select.tgasEffectiveSelect(tsf)
dr, rmin= 0.1, 0.
v= tesf.volume(\
lambda x,y,z: spher_vol_func(x,y,z,rmin=rmin,rmax=rmin+dr),
xyz=True,ndists=251)
v_exp= 4.*numpy.pi*dr**3./3.*comp*(ramax-ramin)/360.
assert(numpy.fabs(v/v_exp-1.) < 10.**-2.), 'Effective volume for unit completeness is not equal to the volume'
# Another one
dr, rmin= 0.2, 0.
v= tesf.volume(\
lambda x,y,z: spher_vol_func(x,y,z,rmin=rmin,rmax=rmin+dr),
xyz=True,ndists=501)
v_exp= 4.*numpy.pi*dr**3./3.*comp*(ramax-ramin)/360.
assert(numpy.fabs(v/v_exp-1.) < 10.**-1.9), 'Effective volume for unit completeness is not equal to the volume'
return None
def azimuth_init(self):
_R_eq = self.radius_eq
_inc = float(self.parking_orbit_inc)
_lat = self.latitude()
_to = float(self.parking_orbit_alt)
_mu = self.mu
_Rot_p = self.rotational_period
node = "Ascending"
if _inc < 0:
node = "Descending"
_inc = np.fabs(_inc)
if (np.fabs(_lat)) > _inc: _inc = np.fabs(_lat)
if (180 - np.fabs(_lat)) < _inc: _inc = (180 - np.fabs(_lat))
velocity_eq = (2 * pi * _R_eq) / _Rot_p
t_orb_v = np.sqrt(_mu / (_to + _R_eq))
return _inc, _lat, velocity_eq, t_orb_v, node
def azimuth_init(self):
_R_eq = self.radius_eq
_inc = float(self.parking_orbit_inc)
_lat = self.latitude()
_to = float(self.parking_orbit_alt)
_mu = self.mu
_Rot_p = self.rotational_period
node = "Ascending"
if _inc < 0:
node = "Descending"
_inc = np.fabs(_inc)
if (np.fabs(_lat)) > _inc: _inc = np.fabs(_lat)
if (180 - np.fabs(_lat)) < _inc: _inc = (180 - np.fabs(_lat))
velocity_eq = (2 * pi * _R_eq) / _Rot_p
t_orb_v = np.sqrt(_mu / (_to + _R_eq))
return _inc, _lat, velocity_eq, t_orb_v, node
def azimuth_init(self):
_R_eq = self.radius_eq
_inc = float(self.parking_orbit_inc)
_lat = self.latitude()
_to = float(self.parking_orbit_alt)
_mu = self.mu
_Rot_p = self.rotational_period
node = "Ascending"
if _inc < 0:
node = "Descending"
_inc = np.fabs(_inc)
if (np.fabs(_lat)) > _inc: _inc = np.fabs(_lat)
if (180 - np.fabs(_lat)) < _inc: _inc = (180 - np.fabs(_lat))
velocity_eq = (2 * pi * _R_eq) / _Rot_p
t_orb_v = np.sqrt(_mu / (_to + _R_eq))
return _inc, _lat, velocity_eq, t_orb_v, node
def azimuth_init(self):
_R_eq = self.radius_eq
_inc = float(self.target_orbit_inc)
_lat = self.latitude()
_to = float(self.target_orbit_alt)
_mu = self.mu
_Rot_p = self.rotational_period
node = "Ascending"
if _inc < 0:
node = "Descending"
_inc = np.fabs(_inc)
if (np.fabs(_lat)) > _inc: _inc = np.fabs(_lat)
if (180 - np.fabs(_lat)) < _inc: _inc = (180 - np.fabs(_lat))
velocity_eq = (2 * pi * _R_eq) / _Rot_p
t_orb_v = np.sqrt(_mu / (_to + _R_eq))
return _inc, _lat, velocity_eq, t_orb_v, node
def test_dipole_fluxpoints(self):
"""Tests dipole flux points."""
field = ElectricField([PointCharge(-2, [0, 0]), PointCharge(2, [2, 0])])
circle = GaussianCircle([0, 0], 0.1)
fluxpoints = circle.fluxpoints(field, 4)
self.assertEqual(len(fluxpoints), 4)
fluxpoints = circle.fluxpoints(field, 14)
self.assertEqual(len(fluxpoints), 14)
self.assertTrue(isclose(fluxpoints[0], [0.1, 0]).all())
self.assertTrue(isclose(fluxpoints[7], [-0.1, 0]).all())
x1 = fluxpoints[1:7]
x2 = fluxpoints[-1:7:-1]
x2[:, 1] = fabs(x2[:, 1])
self.assertTrue(isclose(x1, x2).all())
#-----------------------------------------------------------------------------
# main()
def test_energy_conservation_sech2disk_manyparticles():
# Test that energy is conserved for a self-gravitating disk
N= 101
totmass= 1.
sigma= 1.
zh= 2.*sigma**2./totmass
x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
v= numpy.random.normal(size=N)*sigma
v-= numpy.mean(v) # stabilize
m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
g= wendy.nbody(x,v,m,0.05)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= next(g)
assert numpy.fabs(wendy.energy(tx,tv,m)-E) < 10.**-10., "Energy not conserved during simple N-body integration"
cnt+= 1
return None
def test_energy_conservation_sech2disk_manyparticles():
# Test that energy is conserved for a self-gravitating disk
N= 101
totmass= 1.
sigma= 1.
zh= 2.*sigma**2./totmass
x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
v= numpy.random.normal(size=N)*sigma
v-= numpy.mean(v) # stabilize
m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
omega= 1.1
g= wendy.nbody(x,v,m,0.05,omega=omega)
E= wendy.energy(x,v,m,omega=omega)
cnt= 0
while cnt < 100:
tx,tv= next(g)
assert numpy.fabs(wendy.energy(tx,tv,m,omega=omega)-E) < 10.**-10., "Energy not conserved during simple N-body integration with external harmonic potential"
cnt+= 1
return None
def test_energy_individual():
# Simple test that the individual energies are calculated correctly
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,2.,3.])
omega= 1.1
E= wendy.energy(x,v,m,individual=True,omega=omega)
assert numpy.fabs(E[0]-m[0]*v[0]**2./2.-m[0]*(m[1]*numpy.fabs(x[0]-x[1])
+m[2]*numpy.fabs(x[0]-x[2])
+omega**2.*x[0]**2./2.)) < 10.**-10
assert numpy.fabs(E[1]-m[1]*v[1]**2./2.-m[1]*(m[0]*numpy.fabs(x[0]-x[1])
+m[2]*numpy.fabs(x[2]-x[1])
+omega**2.*x[1]**2./2.)) < 10.**-10
assert numpy.fabs(E[2]-m[2]*v[2]**2./2.-m[2]*(m[0]*numpy.fabs(x[0]-x[2])
+m[1]*numpy.fabs(x[2]-x[1])
+omega**2.*x[2]**2./2.)) < 10.**-10
return None
def test_energy_conservation_sech2disk_manyparticles():
# Test that energy is conserved for a self-gravitating disk
N= 101
totmass= 1.
sigma= 1.
zh= 2.*sigma**2./totmass
x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
v= numpy.random.normal(size=N)*sigma
v-= numpy.mean(v) # stabilize
m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
g= wendy.nbody(x,v,m,0.05,approx=True,nleap=1000)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= next(g)
assert numpy.fabs(wendy.energy(tx,tv,m)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration"
cnt+= 1
return None
def test_notracermasses():
# approx should work with tracer sheets
# Test that energy is conserved for a self-gravitating disk
N= 101
totmass= 1.
sigma= 1.
zh= 2.*sigma**2./totmass
x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
v= numpy.random.normal(size=N)*sigma
v-= numpy.mean(v) # stabilize
m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
m[N//2:]= 0.
m*= 2.
g= wendy.nbody(x,v,m,0.05,approx=True,nleap=1000)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= next(g)
assert numpy.fabs(wendy.energy(tx,tv,m)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration with some tracer particles"
cnt+= 1
return None
def test_energy_conservation_sech2disk_manyparticles():
# Test that energy is conserved for a self-gravitating disk
N= 101
totmass= 1.
sigma= 1.
zh= 2.*sigma**2./totmass
x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
v= numpy.random.normal(size=N)*sigma
v-= numpy.mean(v) # stabilize
m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
omega= 1.1
g= wendy.nbody(x,v,m,0.05,omega=omega,approx=True,nleap=1000)
E= wendy.energy(x,v,m,omega=omega)
cnt= 0
while cnt < 100:
tx,tv= next(g)
assert numpy.fabs(wendy.energy(tx,tv,m,omega=omega)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration with external harmonic potential"
cnt+= 1
return None
def test_againstexact_sech2disk_manyparticles():
# Test that the exact N-body and the approximate N-body agree
N= 101
totmass= 1.
sigma= 1.
zh= 2.*sigma**2./totmass
x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
v= numpy.random.normal(size=N)*sigma
v-= numpy.mean(v) # stabilize
m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
omega= 1.1
g= wendy.nbody(x,v,m,0.05,approx=True,nleap=2000,omega=omega)
ge= wendy.nbody(x,v,m,0.05,omega=omega)
cnt= 0
while cnt < 100:
tx,tv= next(g)
txe,tve= next(ge)
assert numpy.all(numpy.fabs(tx-txe) < 10.**-5.), "Exact and approximate N-body give different positions"
assert numpy.all(numpy.fabs(tv-tve) < 10.**-5.), "Exact and approximate N-body give different positions"
cnt+= 1
return None
def potential(y,x,v,m,twopiG=1.,omega=None):
"""
NAME:
potential
PURPOSE:
compute the gravitational potential at a set of points
INPUT:
y - positions at which to compute the potential
x - positions of N-body particles [N]
v - velocities of N-body particles [N]
m - masses of N-body particles [N]
twopiG= (1.) value of 2 \pi G
omega= (None) if set, frequency of external harmonic oscillator
OUTPUT:
potential(y)
HISTORY:
2017-05-12 - Written - Bovy (UofT/CCA)
"""
if not omega is None:
out= omega**2.*y**2./2.
else:
out= 0.
return out\
+twopiG\
*numpy.sum(m*numpy.fabs(x-numpy.atleast_2d(y).T),axis=1)
def visibilityTest(dpt, loc, tol=2.):
"""
z-buffer like visibility test for non-occluded joints
:param dpt: depth image
:param loc: 2D joint locations
:param tol: tolerance
:return: list of indices of visible ie non-occluded joints
"""
vis = []
for i in range(loc.shape[0]):
y = np.rint(loc[i, 1]).astype(int)
x = np.rint(loc[i, 0]).astype(int)
if 0 <= x < dpt.shape[1] and 0 <= y < dpt.shape[0]:
if np.fabs(dpt[y, x] - loc[i, 2]) < tol:
vis.append(i)
# else:
# print("joint {}: dpt {} anno {}".format(i, dpt[y, x], gtcrop[i, 2]))
return vis
def azimuth_init(self):
_R_eq = self.radius_eq
_inc = float(self.parking_orbit_inc)
_lat = self.latitude()
_to = float(self.parking_orbit_alt)
_mu = self.mu
_Rot_p = self.rotational_period
node = "Ascending"
if _inc < 0:
node = "Descending"
_inc = np.fabs(_inc)
if (np.fabs(_lat)) > _inc: _inc = np.fabs(_lat)
if (180 - np.fabs(_lat)) < _inc: _inc = (180 - np.fabs(_lat))
velocity_eq = (2 * pi * _R_eq) / _Rot_p
t_orb_v = np.sqrt(_mu / (_to + _R_eq))
return _inc, _lat, velocity_eq, t_orb_v, node
def azimuth_init(self):
_R_eq = self.radius_eq
_inc = float(self.target_orbit_inc)
_lat = self.latitude()
_to = float(self.target_orbit_alt)
_mu = self.mu
_Rot_p = self.rotational_period
node = "Ascending"
if _inc < 0:
node = "Descending"
_inc = np.fabs(_inc)
if (np.fabs(_lat)) > _inc: _inc = np.fabs(_lat)
if (180 - np.fabs(_lat)) < _inc: _inc = (180 - np.fabs(_lat))
velocity_eq = (2 * pi * _R_eq) / _Rot_p
t_orb_v = np.sqrt(_mu / (_to + _R_eq))
return _inc, _lat, velocity_eq, t_orb_v, node
def equilibrium_ionization(self):
"""
Calculate the ionization equilibrium for all ions of the element.
Brief explanation and equations about how these equations are solved.
"""
# Make matrix of ionization and recombination rates
a_matrix = np.zeros(self.temperature.shape + (self.atomic_number+1, self.atomic_number+1))
for i in range(1, self.atomic_number):
a_matrix[:, i, i] = -(self[i].ionization_rate() + self[i].recombination_rate()).value
a_matrix[:, i, i-1] = self[i-1].ionization_rate().value
a_matrix[:, i, i+1] = self[i+1].recombination_rate().value
a_matrix[:, 0, 0] = -(self[0].ionization_rate() + self[0].recombination_rate()).value
a_matrix[:, 0, 1] = self[1].recombination_rate().value
a_matrix[:, -1, -1] = -(self[-1].ionization_rate() + self[-1].recombination_rate()).value
a_matrix[:, -1, -2] = self[-2].ionization_rate().value
# Solve system of equations using SVD and normalize
_, _, V = np.linalg.svd(a_matrix)
# Select columns of V with smallest eigenvalues (returned in descending order)
ioneq = np.fabs(V[:, -1, :])
ioneq /= np.sum(ioneq, axis=1)[:, np.newaxis]
return ioneq
def _get_cci(cls, df, n_days=None):
""" Commodity Channel Index
CCI = (Typical Price - 20-period SMA of TP) / (.015 x Mean Deviation)
Typical Price (TP) = (High + Low + Close)/3
TP is also implemented as 'middle'.
:param df: data
:param n_days: N days window
:return: None
"""
if n_days is None:
n_days = 14
column_name = 'cci'
else:
n_days = int(n_days)
column_name = 'cci_{}'.format(n_days)
tp = df['middle']
tp_sma = df['middle_{}_sma'.format(n_days)]
md = df['middle'].rolling(
min_periods=1, center=False, window=n_days).apply(
lambda x: np.fabs(x - x.mean()).mean())
df[column_name] = (tp - tp_sma) / (.015 * md)
def mad(a, axis=None, c=1.4826, return_med=False):
"""Compute normalized median absolute difference
Can also return median array, as this can be expensive, and often we want both med and nmad
Note: 1.4826 = 1/0.6745
"""
a = checkma(a)
#return np.ma.median(np.fabs(a - np.ma.median(a))) / c
if a.count() > 0:
if axis is None:
med = fast_median(a)
out = fast_median(np.fabs(a - med)) * c
else:
med = np.ma.median(a, axis=axis)
out = np.ma.median(np.ma.fabs(a - med), axis=axis) * c
else:
out = np.ma.masked
if return_med:
out = (out, med)
return out
#Percentile values
def find_right_intersect(vec, target_val, start_index=0):
nearest_index = start_index
next_index = start_index
size = len(vec) - 1
if next_index == size:
return size
next_val = vec[next_index]
best_distance = np.abs(next_val - target_val)
while (next_index < size):
next_index += 1
next_val = vec[next_index]
dist = np.fabs(next_val - target_val)
if dist < best_distance:
best_distance = dist
nearest_index = next_index
if next_index == size or next_val < target_val:
break
return nearest_index
def find_left_intersect(vec, target_val, start_index=0):
nearest_index = start_index
next_index = start_index
size = len(vec) - 1
if next_index == size:
return size
next_val = vec[next_index]
best_distance = np.abs(next_val - target_val)
while (next_index > 0):
next_index -= 1
next_val = vec[next_index]
dist = np.fabs(next_val - target_val)
if dist < best_distance:
best_distance = dist
nearest_index = next_index
if next_index == size or next_val < target_val:
break
return nearest_index
nanops.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def _wrap_results(result, dtype):
""" wrap our results if needed """
if is_datetime64_dtype(dtype):
if not isinstance(result, np.ndarray):
result = lib.Timestamp(result)
else:
result = result.view(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
# raise if we have a timedelta64[ns] which is too large
if np.fabs(result) > _int64_max:
raise ValueError("overflow in timedelta operation")
result = lib.Timedelta(result, unit='ns')
else:
result = result.astype('i8').view(dtype)
return result
test_plotting.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def test_irreg_hf(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)
df = DataFrame(np.random.randn(len(idx), 2), idx)
irreg = df.ix[[0, 1, 3, 4]]
ax = irreg.plot()
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
sec = 1. / 24 / 60 / 60
self.assertTrue((np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all(
))
plt.clf()
fig.add_subplot(111)
df2 = df.copy()
df2.index = df.index.asobject
ax = df2.plot()
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
self.assertTrue((np.fabs(diffs[1:] - sec) < 1e-8).all())
def shareflux(lc1, lc2, frac=0.01):
"""
I add "noise" to lc1 and lc2 by randomly sharing flux between the two sources.
:param frac: The stddev of the gaussian "noise" in flux, with respect to the minimum flux in the curves.
"""
if not np.all(lc1.jds == lc2.jds):
raise RuntimeError("I do only work on curves with identical jds !")
#lc1fs = lc1.getrawfluxes()
#lc2fs = lc2.getrawfluxes()
minshift = np.fabs(max(lc1.getminfluxshift(), lc2.getminfluxshift()))
shifts = frac * minshift * np.random.randn(len(lc1))
shifts = np.clip(shifts, -minshift+1.0, minshift-1.0) # To garantee that we won't get negative fluxes
lc1.addfluxes(shifts)
lc2.addfluxes(-shifts)