def plot_eigenvectors(Vh, U, mytitle, which = [0,1,2,5,10,15]):
assert len(which) % 3 == 0
nrows = len(which) / 3
subplot_loc = nrows*100 + 30
plt.figure(figsize=(18,4*nrows))
title_stamp = mytitle + " {0}"
u = dl.Function(Vh)
counter=1
for i in which:
assert i < U.shape[1]
Ui = U[:,i]
if Ui[0] >= 0:
s = 1./np.linalg.norm(Ui, np.inf)
else:
s = -1./np.linalg.norm(Ui, np.inf)
u.vector().set_local(s*Ui)
plot(u, subplot_loc=(subplot_loc+counter), mytitle=title_stamp.format(i), vmin=-1, vmax=1)
counter = counter+1
python类inf()的实例源码
def exportU(self, Vh, fname, varname = "evect", normalize=1):
"""
Export in paraview the generalized eigenvectors U.
Inputs:
- Vh: the parameter finite element space
- fname: the name of the paraview output file
- varname: the name of the paraview variable
- normalize: if True the eigenvector are rescaled such that || u ||_inf = 1
"""
evect = Function(Vh, name=varname)
fid = File(fname)
for i in range(0,self.U.shape[1]):
Ui = self.U[:,i]
if normalize:
s = 1/np.linalg.norm(Ui, np.inf)
evect.vector().set_local(s*Ui)
else:
evect.vector().set_local(Ui)
fid << evect
def __init__( self, get_params_function, try_params_function ):
self.get_params = get_params_function
self.try_params = try_params_function
self.max_iter = 81 # maximum iterations per configuration
self.eta = 3 # defines configuration downsampling rate (default = 3)
self.logeta = lambda x: log( x ) / log( self.eta )
self.s_max = int( self.logeta( self.max_iter ))
self.B = ( self.s_max + 1 ) * self.max_iter
self.results = [] # list of dicts
self.counter = 0
self.best_loss = np.inf
self.best_counter = -1
# can be called multiple times
def load_data(infile, chroms, resolutions):
starts = infile['starts'][...]
chromosomes = infile['chromosomes'][...]
data = {}
for res in resolutions:
data[res] = {}
for i, chrom in enumerate(chromosomes):
if chrom not in chroms:
continue
start = (starts[i] / res) * res
dist = infile['dist.%s.%i' % (chrom, res)][...]
valid_rows = infile['valid.%s.%i' % (chrom, res)][...]
corr = infile['corr.%s.%i' % (chrom, res)][...]
valid = numpy.zeros(corr.shape, dtype=numpy.bool)
N, M = corr.shape
valid = numpy.zeros((N, M), dtype=numpy.int32)
for i in range(min(N - 1, M)):
P = N - i - 1
valid[:P, i] = valid_rows[(i + 1):] * valid_rows[:P]
temp = corr * dist
valid[numpy.where(numpy.abs(temp) == numpy.inf)] = False
data[res][chrom] = [start, temp, valid]
return data
def load_data(infile, chroms, resolutions):
starts = infile['starts'][...]
chromosomes = infile['chromosomes'][...]
data = {}
for res in resolutions:
data[res] = {}
for i, chrom in enumerate(chromosomes):
if chrom not in chroms:
continue
start = (starts[i] / res) * res
dist = infile['dist.%s.%i' % (chrom, res)][...]
valid_rows = infile['valid.%s.%i' % (chrom, res)][...]
corr = infile['corr.%s.%i' % (chrom, res)][...]
valid = numpy.zeros(corr.shape, dtype=numpy.bool)
N, M = corr.shape
valid = numpy.zeros((N, M), dtype=numpy.int32)
for i in range(min(N - 1, M)):
P = N - i - 1
valid[:P, i] = valid_rows[(i + 1):] * valid_rows[:P]
temp = corr * dist
valid[numpy.where(numpy.abs(temp) == numpy.inf)] = False
data[res][chrom] = [start, temp, valid]
return data
def monotoneTFosc(f):
"""Maps [-inf,inf] to [-inf,inf] with different constants
for positive and negative part.
"""
if np.isscalar(f):
if f > 0.:
f = np.log(f) / 0.1
f = np.exp(f + 0.49 * (np.sin(f) + np.sin(0.79 * f))) ** 0.1
elif f < 0.:
f = np.log(-f) / 0.1
f = -np.exp(f + 0.49 * (np.sin(0.55 * f) + np.sin(0.31 * f))) ** 0.1
return f
else:
f = np.asarray(f)
g = f.copy()
idx = (f > 0)
g[idx] = np.log(f[idx]) / 0.1
g[idx] = np.exp(g[idx] + 0.49 * (np.sin(g[idx]) + np.sin(0.79 * g[idx])))**0.1
idx = (f < 0)
g[idx] = np.log(-f[idx]) / 0.1
g[idx] = -np.exp(g[idx] + 0.49 * (np.sin(0.55 * g[idx]) + np.sin(0.31 * g[idx])))**0.1
return g
def get_float_parameter(self, param_id):
return self.RAPI_rc(vrep.simxGetFloatingParameter( self.cID,
param_id,
vrep.simx_opmode_blocking))[0]
# openai/gym
# Set this in SOME subclasses
#metadata = {'render.modes': []}
#reward_range = (-np.inf, np.inf)
# Override in SOME subclasses
#def _close(self): pass
# Set these in ALL subclasses
#action_space = None
#observation_space = None
# Override in ALL subclasses
#def _step(self, action): raise NotImplementedError
#def _reset(self): raise NotImplementedError
#def _render(self, mode='human', close=False): return
#def _seed(self, seed=None): return []
def _solveRelativeDG(self, points):
""" Solves the norm constrained version of the problem.
min sum z_q
st z_q >= c'x_q - 1
z_q >= 1 - c'x_q
A'y = c
b'y = 1
||c|| = 1
y >= 0
"""
if self.normalize_c == 1:
error = self._solveRelativeDGNorm1(points)
elif self.normalize_c == np.inf:
error = self._solveRelativeDGNormInf(points)
return error
def _solveFeasibleProjection(self, points):
m, n = self.A.shape
bestResult = np.inf
for i in range(m):
if i in self.ban_constraints:
result = np.inf
else:
ai = self.A[i]
bi = self.b[i]
result = self._project_to_hyperplane(points, ai, bi)
if result < bestResult:
bestResult = result
self.dual = np.zeros(m)
self.dual[i] = 1.0 / np.linalg.norm(ai, np.inf)
self.c = ai / np.linalg.norm(ai, np.inf)
self._solved = True
#self.dual = self.dual.T.tolist()[0]
self.c = self.c.tolist()[0]
self.error = bestResult
return result
def _initialize_kwargs(self, kwargs):
# common kwargs
if 'verbose' in kwargs:
assert isinstance(kwargs['verbose'],
bool), 'verbose needs to be True or False.'
self._verbose = kwargs['verbose']
if 'tol' in kwargs:
assert isinstance(kwargs['tol'],
int), 'tolerance needs to be an integer.'
self.tol = kwargs['tol']
# class specific kwargs
if 'p' in kwargs:
assert isinstance(
kwargs['p'],
int) or kwargs['p'] is 'inf', 'p needs to be an integer'
self.p = kwargs['p']
return kwargs
def _build_graph(self, image_size):
self.image_size = image_size
self.images = tf.placeholder(tf.float32,
shape = (None, image_size, image_size, 3))
images_mini = tf.image.resize_images(self.images,
size = (int(image_size/4),
int(image_size/4)))
self.images_blur = tf.image.resize_images(images_mini,
size = (image_size, image_size))
self.net = U_Net(output_ch = 3, block_fn = 'origin')
self.images_reconst = self.net(self.images_blur, reuse = False)
# self.image_reconst can be [-inf +inf], so need to clip its value if visualize them as images.
self.loss = tf.reduce_mean((self.images_reconst - self.images)**2)
self.opt = tf.train.AdamOptimizer()\
.minimize(self.loss, var_list = self.net.vars)
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
def test_pitch_estimation(self):
"""
test pitch estimation algo with contrived small example
if pitch is within 5 Hz, then say its good (for this small example,
since the algorithm wasn't made for this type of synthesized signal)
"""
cfg = ExperimentConfig(pitch_strength_thresh=-np.inf)
# the next 3 variables are in Hz
tolerance = 5
fs = 48000
f = 150
# create a sine wave of f Hz freq sampled at fs Hz
x = np.sin(2*np.pi * f/fs * np.arange(2**10))
# estimate the pitch, it should be close to f
p, t, s = pest.pitch_estimation(x, fs, cfg)
self.assertTrue(np.all(np.abs(p - f) < tolerance))
def test_PlotCurveItem():
p = pg.GraphicsWindow()
p.ci.layout.setContentsMargins(4, 4, 4, 4) # default margins vary by platform
v = p.addViewBox()
p.resize(200, 150)
data = np.array([1,4,2,3,np.inf,5,7,6,-np.inf,8,10,9,np.nan,-1,-2,0])
c = pg.PlotCurveItem(data)
v.addItem(c)
v.autoRange()
# Check auto-range works. Some platform differences may be expected..
checkRange = np.array([[-1.1457564053237301, 16.145756405323731], [-3.076811473165955, 11.076811473165955]])
assert np.allclose(v.viewRange(), checkRange)
assertImageApproved(p, 'plotcurveitem/connectall', "Plot curve with all points connected.")
c.setData(data, connect='pairs')
assertImageApproved(p, 'plotcurveitem/connectpairs', "Plot curve with pairs connected.")
c.setData(data, connect='finite')
assertImageApproved(p, 'plotcurveitem/connectfinite', "Plot curve with finite points connected.")
c.setData(data, connect=np.array([1,1,1,0,1,1,0,0,1,0,0,0,1,1,0,0]))
assertImageApproved(p, 'plotcurveitem/connectarray', "Plot curve with connection array.")
def test_rescaleData():
dtypes = map(np.dtype, ('ubyte', 'uint16', 'byte', 'int16', 'int', 'float'))
for dtype1 in dtypes:
for dtype2 in dtypes:
data = (np.random.random(size=10) * 2**32 - 2**31).astype(dtype1)
for scale, offset in [(10, 0), (10., 0.), (1, -50), (0.2, 0.5), (0.001, 0)]:
if dtype2.kind in 'iu':
lim = np.iinfo(dtype2)
lim = lim.min, lim.max
else:
lim = (-np.inf, np.inf)
s1 = np.clip(float(scale) * (data-float(offset)), *lim).astype(dtype2)
s2 = pg.rescaleData(data, scale, offset, dtype2)
assert s1.dtype == s2.dtype
if dtype2.kind in 'iu':
assert np.all(s1 == s2)
else:
assert np.allclose(s1, s2)
def time_slice(self, t_start, t_stop):
'''
Creates a new :class:`Event` corresponding to the time slice of
the original :class:`Event` between (and including) times
:attr:`t_start` and :attr:`t_stop`. Either parameter can also be None
to use infinite endpoints for the time interval.
'''
_t_start = t_start
_t_stop = t_stop
if t_start is None:
_t_start = -np.inf
if t_stop is None:
_t_stop = np.inf
indices = (self >= _t_start) & (self <= _t_stop)
new_evt = self[indices]
return new_evt
def time_slice(self, t_start, t_stop):
'''
Creates a new :class:`Epoch` corresponding to the time slice of
the original :class:`Epoch` between (and including) times
:attr:`t_start` and :attr:`t_stop`. Either parameter can also be None
to use infinite endpoints for the time interval.
'''
_t_start = t_start
_t_stop = t_stop
if t_start is None:
_t_start = -np.inf
if t_stop is None:
_t_stop = np.inf
indices = (self >= _t_start) & (self <= _t_stop)
new_epc = self[indices]
new_epc.durations = self.durations[indices]
new_epc.labels = self.labels[indices]
return new_epc
def test_rescaleData():
dtypes = map(np.dtype, ('ubyte', 'uint16', 'byte', 'int16', 'int', 'float'))
for dtype1 in dtypes:
for dtype2 in dtypes:
data = (np.random.random(size=10) * 2**32 - 2**31).astype(dtype1)
for scale, offset in [(10, 0), (10., 0.), (1, -50), (0.2, 0.5), (0.001, 0)]:
if dtype2.kind in 'iu':
lim = np.iinfo(dtype2)
lim = lim.min, lim.max
else:
lim = (-np.inf, np.inf)
s1 = np.clip(float(scale) * (data-float(offset)), *lim).astype(dtype2)
s2 = pg.rescaleData(data, scale, offset, dtype2)
assert s1.dtype == s2.dtype
if dtype2.kind in 'iu':
assert np.all(s1 == s2)
else:
assert np.allclose(s1, s2)
def time_slice(self, t_start, t_stop):
'''
Creates a new :class:`Epoch` corresponding to the time slice of
the original :class:`Epoch` between (and including) times
:attr:`t_start` and :attr:`t_stop`. Either parameter can also be None
to use infinite endpoints for the time interval.
'''
_t_start = t_start
_t_stop = t_stop
if t_start is None:
_t_start = -np.inf
if t_stop is None:
_t_stop = np.inf
indices = (self >= _t_start) & (self <= _t_stop)
new_epc = self[indices]
new_epc.durations = self.durations[indices]
new_epc.labels = self.labels[indices]
return new_epc
def time_slice(self, t_start, t_stop):
'''
Creates a new :class:`SpikeTrain` corresponding to the time slice of
the original :class:`SpikeTrain` between (and including) times
:attr:`t_start` and :attr:`t_stop`. Either parameter can also be None
to use infinite endpoints for the time interval.
'''
_t_start = t_start
_t_stop = t_stop
if t_start is None:
_t_start = -np.inf
if t_stop is None:
_t_stop = np.inf
indices = (self >= _t_start) & (self <= _t_stop)
new_st = self[indices]
new_st.t_start = max(_t_start, self.t_start)
new_st.t_stop = min(_t_stop, self.t_stop)
if self.waveforms is not None:
new_st.waveforms = self.waveforms[indices]
return new_st
def __call__(self, params):
print '???', params
sd1 = params[0]
sd2 = params[1]
cor = params[2]
if sd1 < 0. or sd1 > 10. or sd2 < 0. or sd2 > 10. or cor < -1. or cor > 1.:
return np.inf
bandwidth = maths.stats.choleskysqrt2d(sd1, sd2, cor)
bandwidthdet = la.det(bandwidth)
bandwidthinv = la.inv(bandwidth)
diff = sample[self.__iidx] - sample[self.__jidx]
temp = diff.dot(bandwidthinv.T)
temp *= temp
e = np.exp(np.sum(temp, axis=1))
s = np.sum(e**(-.25) - 4 * e**(-.5))
cost = self.__n / bandwidthdet + (2. / bandwidthdet) * s
print '!!!', cost
return cost / 10000.
def test_ecdf_formal_custom():
assert dcst.ecdf_formal(0.1, [0, 1, 2, 3]) == 0.25
assert dcst.ecdf_formal(-0.1, [0, 1, 2, 3]) == 0.0
assert dcst.ecdf_formal(0.1, [3, 2, 0, 1]) == 0.25
assert dcst.ecdf_formal(-0.1, [3, 2, 0, 1]) == 0.0
assert dcst.ecdf_formal(2, [3, 2, 0, 1]) == 0.75
assert dcst.ecdf_formal(1, [3, 2, 0, 1]) == 0.5
assert dcst.ecdf_formal(3, [3, 2, 0, 1]) == 1.0
assert dcst.ecdf_formal(0, [3, 2, 0, 1]) == 0.25
with pytest.raises(RuntimeError) as excinfo:
dcst.ecdf_formal([np.nan, np.inf], [0, 1, 2, 3])
excinfo.match('Input cannot have NaNs.')
correct = np.array([1.0, 1.0])
result = dcst.ecdf_formal([3.1, np.inf], [3, 2, 0, 1])
assert np.allclose(correct, result, atol=atol)
def test_draw_bs_pairs_linreg_nan():
x = np.array([])
y = np.array([])
with pytest.raises(RuntimeError) as excinfo:
dcst.draw_bs_pairs_linreg(x, y, size=1)
excinfo.match('Arrays must have at least 2 mutual non-NaN entries.')
x = np.array([np.nan])
y = np.array([np.nan])
with pytest.raises(RuntimeError) as excinfo:
dcst.draw_bs_pairs_linreg(x, y, size=1)
excinfo.match('Arrays must have at least 2 mutual non-NaN entries.')
x = np.array([np.nan, 1])
y = np.array([1, np.nan])
with pytest.raises(RuntimeError) as excinfo:
dcst.draw_bs_pairs_linreg(x, y, size=1)
excinfo.match('Arrays must have at least 2 mutual non-NaN entries.')
x = np.array([0, 1, 5])
y = np.array([1, np.inf, 3])
with pytest.raises(RuntimeError) as excinfo:
dcst.draw_bs_pairs_linreg(x, y, size=1)
excinfo.match('All entries in arrays must be finite.')
def test_pearson_r_edge():
x = np.array([])
y = np.array([])
with pytest.raises(RuntimeError) as excinfo:
dcst.pearson_r(x, y)
excinfo.match('Arrays must have at least 2 mutual non-NaN entries.')
x = np.array([np.nan])
y = np.array([np.nan])
with pytest.raises(RuntimeError) as excinfo:
dcst.pearson_r(x, y)
excinfo.match('Arrays must have at least 2 mutual non-NaN entries.')
x = np.array([np.nan, 1])
y = np.array([1, np.nan])
with pytest.raises(RuntimeError) as excinfo:
dcst.pearson_r(x, y)
excinfo.match('Arrays must have at least 2 mutual non-NaN entries.')
x = np.array([0, 1, 5])
y = np.array([1, np.inf, 3])
with pytest.raises(RuntimeError) as excinfo:
dcst.pearson_r(x, y)
excinfo.match('All entries in arrays must be finite.')
def make_3d_mask(img_shape, center, radius, shape='sphere'):
mask = np.zeros(img_shape)
radius = np.rint(radius)
center = np.rint(center)
sz = np.arange(int(max(center[0] - radius, 0)), int(max(min(center[0] + radius + 1, img_shape[0]), 0)))
sy = np.arange(int(max(center[1] - radius, 0)), int(max(min(center[1] + radius + 1, img_shape[1]), 0)))
sx = np.arange(int(max(center[2] - radius, 0)), int(max(min(center[2] + radius + 1, img_shape[2]), 0)))
sz, sy, sx = np.meshgrid(sz, sy, sx)
if shape == 'cube':
mask[sz, sy, sx] = 1.
elif shape == 'sphere':
distance2 = ((center[0] - sz) ** 2
+ (center[1] - sy) ** 2
+ (center[2] - sx) ** 2)
distance_matrix = np.ones_like(mask) * np.inf
distance_matrix[sz, sy, sx] = distance2
mask[(distance_matrix <= radius ** 2)] = 1
elif shape == 'gauss':
z, y, x = np.ogrid[:mask.shape[0], :mask.shape[1], :mask.shape[2]]
distance = ((z - center[0]) ** 2 + (y - center[1]) ** 2 + (x - center[2]) ** 2)
mask = np.exp(- 1. * distance / (2 * radius ** 2))
mask[(distance > 3 * radius ** 2)] = 0
return mask
def __init__(self, datadir,
target_size = 108, image_size = 64,
split = 5, num_utilize = np.inf):
self.datadir = datadir
self.target_size = target_size
self.image_size = image_size
self.split = split
self.image_paths = []
for d in self.datadir:
self.image_paths += glob(d + '/*.jpg')
self.data_size = min(len(self.image_paths), num_utilize)
print('data size : {}'.format(self.data_size))
self.image_paths = np.random.choice(self.image_paths,
self.data_size,
replace = False)
self.data = None
def SLcomputePSNR(X, Xnoisy):
"""
SLcomputePSNR Compute peak signal to noise ratio (PSNR).
Usage:
PSNR = SLcomputePSNR(X, Xnoisy)
Input:
X: 2D or 3D signal.
Xnoisy: 2D or 3D noisy signal.
Output:
PSNR: The peak signal to noise ratio (in dB).
"""
MSEsqrt = np.linalg.norm(X-Xnoisy) / np.sqrt(X.size)
if MSEsqrt == 0:
return np.inf
else:
return 20 * np.log10(255 / MSEsqrt)
def decoding(self, src_encodings):
src_len = len(src_encodings)
# NOTE: should transpose before calling `mst` method!
s_arc, s_label = self.cal_scores(src_encodings)
s_arc_values = s_arc.npvalue().transpose() # src_len, src_len
s_label_values = np.asarray([x.npvalue() for x in s_label]).transpose((2, 1, 0)) # src_len, src_len, n_labels
# weights = np.zeros((src_len + 1, src_len + 1))
# weights[0, 1:(src_len + 1)] = np.inf
# weights[1:(src_len + 1), 0] = np.inf
# weights[1:(src_len + 1), 1:(src_len + 1)] = s_arc_values[batch]
weights = s_arc_values
pred_heads = mst(weights)
pred_labels = [np.argmax(labels[head]) for head, labels in zip(pred_heads, s_label_values)]
return pred_heads, pred_labels
def __init__(self, image, freq, pixelsize, ra0, dec0,
minvalue=1e-4, maxvalue=np.inf, mask=None,
projection="CAR"):
self.image = image # [K] (brightness temperature)
self.freq = freq # [MHz]
self.pixelsize = pixelsize # [arcsec]
self.ra0 = ra0 # [deg]
self.dec0 = dec0 # [deg]
self.minvalue = minvalue
self.maxvalue = maxvalue
self.mask = mask
self.projection = projection
logger.info("SkyModel: Loaded image @ %.2f [MHz], " % freq +
"%.1f [arcsec/pixel]" % pixelsize)
logger.info("Image size: %dx%d" % self.shape)
logger.info("FoV size: %.2fx%.2f [deg^2]" % self.fov)
def make_data_frame(words, years, feature_dict):
"""
Makes a pandas dataframe for word, years, and dictionary of feature funcs.
Each feature func should take (word, year) and return feature value.
Constructed dataframe has flat csv style structure and missing values are removed.
"""
temp = collections.defaultdict(list)
feature_dict["word"] = lambda word, year : word
feature_dict["year"] = lambda word, year : year
for word in words:
for year in years:
for feature, feature_func in feature_dict.iteritems():
temp[feature].append(feature_func(word, year))
df = pd.DataFrame(temp)
df = df.replace([np.inf, -np.inf], np.nan)
df = df.dropna()
return df
def run_test_episode(env, policy, episode_len=np.inf, render=False):
"""
Run an episode and return the reward
"""
episode_itr = 0
total_reward = 0.0
done = False
obs = env.reset()
while not done and episode_itr < episode_len:
if render:
env.render()
obs = apply_prediction_preprocessors(policy, obs)
action = policy.predict(obs)
action = apply_prediction_postprocessors(policy, action)
obs, reward, done, _ = env.step(action)
total_reward += reward
episode_itr += 1
return total_reward