def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
python类seterr()的实例源码
def _logcdf(self, samples):
lower = np.full(2, -np.inf)
upper = norm.ppf(samples)
limit_flags = np.zeros(2)
if upper.shape[0] > 0:
def func1d(upper1d):
'''
Calculates the multivariate normal cumulative distribution
function of a single sample.
'''
return mvn.mvndst(lower, upper1d, limit_flags, self.theta)[1]
vals = np.apply_along_axis(func1d, -1, upper)
else:
vals = np.empty((0, ))
old_settings = np.seterr(divide='ignore')
vals = np.log(vals)
np.seterr(**old_settings)
vals[np.any(samples == 0.0, axis=1)] = -np.inf
vals[samples[:, 0] == 1.0] = np.log(samples[samples[:, 0] == 1.0, 1])
vals[samples[:, 1] == 1.0] = np.log(samples[samples[:, 1] == 1.0, 0])
return vals
def gaussian_function(y, dimension, ?, cov, log=False, standard=False):
"""??????????? y???(???) ??????(?????) cov??????,log????????,standard??????"""
x = y - ?
if standard:
x = np.dot(x, np.linalg.inv(cov) ** 0.5)
cov_ = np.eye(dimension)
else:
cov_ = cov
np.seterr(all='ignore') # ??????
if log:
func = - (dimension / 2) * np.log(2 * math.pi) - 0.5 * np.log(np.linalg.det(cov_))
exp = -0.5 * np.dot(np.dot(x, np.linalg.inv(cov_)), x.T)
return func + exp
else:
sigma = (2 * math.pi) ** (dimension / 2) * np.linalg.det(cov_) ** 0.5
func = 1. / sigma
exp = np.exp(-0.5 * np.dot(np.dot(x, np.linalg.inv(cov_)), x.T))
return func * exp
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def with_error_settings(**new_settings):
"""
TODO.
Arguments:
**new_settings: TODO
Returns:
"""
@decorator.decorator
def dec(f, *args, **kwargs):
old_settings = np.geterr()
np.seterr(**new_settings)
ret = f(*args, **kwargs)
np.seterr(**old_settings)
return ret
return dec
def evaluate(img_col, args):
numpy.seterr(all='ignore')
assert isinstance(img_col, numpy.ndarray), 'img_col must be a numpy array'
assert img_col.ndim == 3, 'img_col must be a color image ({0} dimensions currently)'.format(img_col.ndim)
assert isinstance(args, argparse.Namespace), 'args must be of type argparse.Namespace not {0}'.format(type(args))
img_gry = cv2.cvtColor(img_col, cv2.COLOR_RGB2GRAY)
rows, cols = img_gry.shape
crow, ccol = rows/2, cols/2
f = numpy.fft.fft2(img_gry)
fshift = numpy.fft.fftshift(f)
fshift[crow-75:crow+75, ccol-75:ccol+75] = 0
f_ishift = numpy.fft.ifftshift(fshift)
img_fft = numpy.fft.ifft2(f_ishift)
img_fft = 20*numpy.log(numpy.abs(img_fft))
if args.display and not args.testing:
cv2.destroyAllWindows()
scripts.display('img_fft', img_fft)
scripts.display('img_col', img_col)
cv2.waitKey(0)
result = numpy.mean(img_fft)
return img_fft, result, result < args.thresh
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def pwdist_canberra(self, seq1idx, seq2idx):
"""Compute the Canberra distance between two vectors.
References:
1. http://scipy.org/
Notes:
When `u[i]` and `v[i]` are 0 for given i, then
the fraction 0/0 = 0 is used in the calculation.
"""
u = self[seq1idx]
v = self[seq2idx]
olderr = np.seterr(invalid='ignore')
try:
d = np.nansum(abs(u - v) / (abs(u) + abs(v)))
finally:
np.seterr(**olderr)
return d
def Salton(MatrixAdjacency_Train):
similarity_StartTime = time.clock()
similarity = np.dot(MatrixAdjacency_Train,MatrixAdjacency_Train)
deg_row = sum(MatrixAdjacency_Train)
deg_row.shape = (deg_row.shape[0],1)
deg_row_T = deg_row.T
tempdeg = np.dot(deg_row,deg_row_T)
temp = np.sqrt(tempdeg)
np.seterr(divide='ignore', invalid='ignore')
Matrix_similarity = np.nan_to_num(similarity / temp)
# print np.isnan(Matrix_similarity)
# Matrix_similarity = np.nan_to_num(Matrix_similarity)
# print np.isnan(Matrix_similarity)
similarity_EndTime = time.clock()
print " SimilarityTime: %f s" % (similarity_EndTime- similarity_StartTime)
return Matrix_similarity
def feature(self, words):
"""average words' vectors"""
feature_vec = np.zeros((self.dimension,), dtype="float32")
retrieved_words = 0
for token in words:
try:
feature_vec = np.add(feature_vec, self.embeddings[token])
retrieved_words += 1
except KeyError:
pass # if a word is not in the embeddings' vocabulary discard it
np.seterr(divide='ignore', invalid='ignore')
feature_vec = np.divide(feature_vec, retrieved_words)
return feature_vec
test_core.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def __init__(self, policy, mdp_info, params, features=None):
self.__name__ = 'GPOMDP'
super(GPOMDP, self).__init__(policy, mdp_info, params, features)
self.sum_d_log_pi = None
self.list_sum_d_log_pi = list()
self.list_sum_d_log_pi_ep = list()
self.list_reward = list()
self.list_reward_ep = list()
self.baseline_num = list()
self.baseline_den = list()
self.step_count = 0
# Ignore divide by zero
np.seterr(divide='ignore', invalid='ignore')
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def main():
with open(path, 'r') as data_file:
movieList = json.load(data_file)
count = 0
np.seterr(divide='ignore', invalid='ignore')
for movie in movieList:
if movie["reviews"] and movie["critics"]:
readMovie(movie)
# reviewer_list[movie["movieTitle"]] = recommender(movie)
# cluster[movie["movieTitle"]] = HAC(movie)
score_list[movie["movieTitle"]] = newScore(movie)
count += 1
print count
break
with open("scoreList.json", 'w+') as outfile:
json.dump(score_list, outfile, indent=4, separators=(',', ': '))
def get_base_means(raw_read_coverage, chrm_sizes):
# ignore divide by zero errors that occur where there is no
# coverage. Need to correct nan values after subtracting two sets of
# coverage so leave as nan for now
old_err_settings = np.seterr(all='ignore')
# take the mean over all signal overlapping each base
mean_base_signal = {}
for chrm, strand in [(c, s) for c in chrm_sizes.keys()
for s in ('+', '-')]:
if (chrm, strand) in raw_read_coverage:
cs_base_means = get_reads_base_means(
raw_read_coverage[(chrm, strand)], chrm_sizes[chrm],
strand == '-')
else:
cs_base_means = np.empty(chrm_sizes[chrm])
cs_base_means[:] = np.nan
mean_base_signal[(chrm, strand)] = cs_base_means
_ = np.seterr(**old_err_settings)
return mean_base_signal
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def __call__(self, y_true_proba, y_proba):
"""
See Murphy (1973) A vector partition of the probability score
"""
np.seterr(divide="ignore")
pos_obs_freq = np.histogram(
y_proba[y_true_proba == 1], bins=self.bins)[0]
fore_freq = np.histogram(y_proba, bins=self.bins)[0]
climo = y_true_proba.mean()
unc = climo * (1 - climo)
pos_obs_rel_freq = np.zeros(pos_obs_freq.size)
for p in range(pos_obs_rel_freq.size):
if fore_freq[p] > 0:
pos_obs_rel_freq[p] = pos_obs_freq[p] / fore_freq[p]
else:
pos_obs_rel_freq[p] = np.nan
score = np.nansum(fore_freq * (pos_obs_rel_freq - climo) ** 2)
score /= float(y_proba.size)
return score / unc
def dephasing(f):
"""
Computes the dephasing time of a given function using optical response
formalisms:
S. Mukamel, Principles of Nonlinear Optical Spectroscopy, 1995
About the implementation we use the 2nd order cumulant expansion.
See also eq. (2) in : Kilina et al. Phys. Rev. Lett., 110, 180404, (2013)
To calculate the dephasing time tau we fit the dephasing function to a
gaussian of the type : exp(-0.5 * (-x / tau) ** 2)
"""
ts = np.arange(f.shape[0])
cumu_ii = np.stack(np.sum(f[0:i]) for i in range(ts.size)) / hbar
cumu_i = np.stack(np.sum(cumu_ii[0:i]) for i in range(ts.size)) / hbar
deph = np.exp(-cumu_i)
np.seterr(over='ignore')
popt = curve_fit(gauss_function, ts, deph)[0]
xs = np.exp(-0.5 * (-ts / popt[0]) ** 2)
deph = np.column_stack((deph, xs))
rate = popt[0]
return deph, rate
def run(self):
numpy.seterr(all='raise')
try:
with self._pulseaudio_client:
while not self._stop_event.is_set():
with self.lock:
self._sample()
for callback in self._on_sample_callbacks:
callback()
except Exception as e:
self.exit_success = False
raise e
else:
self.exit_success = True
def get_vlb(self):
# return avg energy plus entropy, our contribution to the mean field
# variational lower bound
errs = np.seterr(invalid='ignore',divide='ignore')
prod = self.r*np.log(self.r)
prod[np.isnan(prod)] = 0. # 0 * -inf = 0.
np.seterr(**errs)
logpitilde = self.weights.expected_log_likelihood(np.arange(len(self.components)))
q_entropy = -prod.sum()
p_avgengy = (self.r*logpitilde).sum()
return p_avgengy + q_entropy
### EM
def _expected_durations(self,
dur_potentials,cumulative_obs_potentials,
alphastarl,betal,normalizer):
logpmfs = -np.inf*np.ones((self.Tfull,alphastarl.shape[1]))
errs = np.seterr(invalid='ignore') # logaddexp(-inf,-inf)
# TODO censoring not handled correctly here
for tblock in xrange(self.Tblock):
possible_durations = self.segmentlens[tblock:].cumsum()[:self.trunc]
cB, offset = cumulative_obs_potentials(tblock)
logpmfs[possible_durations -1] = np.logaddexp(
dur_potentials(tblock) + alphastarl[tblock]
+ betal[tblock:tblock+self.trunc if self.trunc is not None else None]
+ cB - (offset + normalizer),
logpmfs[possible_durations -1])
np.seterr(**errs)
return np.exp(logpmfs.T)
###################
# sparate trans #
###################
def max_likelihood(self,stateseqs=None,expected_transcounts=None):
trans_counts = sum(expected_transcounts) if stateseqs is None \
else self._count_transitions(stateseqs)
# NOTE: could just call max_likelihood on each trans row, but this way
# it handles a few lazy-initialization cases (e.g. if _row_distns aren't
# initialized)
errs = np.seterr(invalid='ignore',divide='ignore')
trans_matrix = np.nan_to_num(trans_counts / trans_counts.sum(1)[:,na])
np.seterr(**errs)
# all-zero rows get set to uniform
trans_matrix[trans_matrix.sum(1) == 0] = 1./trans_matrix.shape[0]
assert np.allclose(trans_matrix.sum(1),1.)
self.trans_matrix = trans_matrix
return self
def max_likelihood(self,stateseqs=None,expected_transcounts=None):
trans_counts = sum(expected_transcounts) if stateseqs is None \
else self._count_transitions(stateseqs)
# NOTE: we could just call max_likelihood on each trans row, but this
# way it's a bit nicer
errs = np.seterr(invalid='ignore',divide='ignore')
trans_matrix = np.nan_to_num(trans_counts / trans_counts.sum(1)[:,na])
np.seterr(**errs)
# all-zero rows get set to uniform
trans_matrix[trans_matrix.sum(1) == 0] = 1./(trans_matrix.shape[0]-1)
trans_matrix.flat[::trans_matrix.shape[0]+1] = 0.
self.trans_matrix = trans_matrix
assert np.allclose(0.,np.diag(self.trans_matrix))
assert np.allclose(1.,self.trans_matrix.sum(1))
return self
def _messages_backwards_log_slow(trans_potential, init_potential, likelihood_log_potential,
feature_weights, window_data):
errs = np.seterr(over='ignore')
Al = np.log(trans_potential)
pil = np.log(init_potential)
aBl = likelihood_log_potential
nhs = trans_potential.shape[0]
sequence_length = aBl.shape[0]
betal = np.zeros((sequence_length, nhs * 2))
giant_Al_pil = np.tile(np.vstack((np.tile(pil, (nhs,1)), Al )), (1,2))
for t in xrange(betal.shape[0]-2,-1,-1):
temp_constant = np.sum(feature_weights[:-nhs-1] * window_data[t+1,:]) + feature_weights[-1]
temp_exp = temp_constant + feature_weights[-nhs-1:-1]
temp_logaddexp = np.logaddexp(0, temp_exp)
temp_log_linear = np.tile(temp_exp, 2) * np.repeat([0,1], nhs) - np.tile(temp_logaddexp, 2)
np.logaddexp.reduce( giant_Al_pil + betal[t+1] +
np.hstack((aBl[t+1], aBl[t+1])) +
temp_log_linear
,axis=1 ,out=(betal[t]))
np.seterr(**errs)
return betal
def _messages_backwards_log_fast(trans_potential, init_potential, likelihood_log_potential_llt):
errs = np.seterr(over='ignore')
Al = np.log(trans_potential)
pil = np.log(init_potential)
aBl = likelihood_log_potential_llt
nhs = trans_potential.shape[0]
sequence_length = aBl.shape[0]
betal = np.zeros((sequence_length, nhs * 2))
giant_Al_pil = np.tile(np.vstack((np.tile(pil, (nhs,1)), Al )), (1,2))
for t in xrange(betal.shape[0]-2,-1,-1):
np.logaddexp.reduce( giant_Al_pil + betal[t+1] + aBl[t+1], axis=1, out=(betal[t]))
np.seterr(**errs)
return betal
### Gibbs sampling
def _sample_forwards_log(self, betal, trans_matrix, init_state_distn, log_likelihoods_loglinear):
errs = np.seterr(over='ignore')
Al = trans_matrix
aBl = log_likelihoods_loglinear
T = aBl.shape[0]
pil = init_state_distn
nhs = trans_matrix.shape[0]
giant_Al_pil = np.tile(np.vstack((np.tile(pil, (nhs,1)), Al )), (1,2))
stateseq = np.empty(T,dtype=np.int32)
true_segmentation = np.ones(T,dtype=np.int32)
nextstate_unsmoothed = np.tile(init_state_distn, 2)
for idx in xrange(T):
logdomain = betal[idx] + aBl[idx] ###check this for the initial and last state and compare with the forward message
logdomain[nextstate_unsmoothed == 0] = -np.inf
if np.any(np.isfinite(logdomain)):
stateseq[idx] = sample_discrete(nextstate_unsmoothed * np.exp(logdomain - np.amax(logdomain)))
else:
stateseq[idx] = sample_discrete(nextstate_unsmoothed)
if stateseq[idx] < nhs: true_segmentation[idx] = 0
nextstate_unsmoothed = giant_Al_pil[stateseq[idx]]
return stateseq, true_segmentation
def evaluate(self,n, features, stack_float, stack_bool,labels=None):
"""evaluate node in program"""
np.seterr(all='ignore')
if len(stack_float) >= n.arity['f'] and len(stack_bool) >= n.arity['b']:
if n.out_type == 'f':
stack_float.append(
self.safe(self.eval_dict[n.name](n,features,stack_float,
stack_bool,labels)))
if (np.isnan(stack_float[-1]).any() or
np.isinf(stack_float[-1]).any()):
print("problem operator:",n)
else:
stack_bool.append(self.safe(self.eval_dict[n.name](n,features,
stack_float,
stack_bool,
labels)))
if np.isnan(stack_bool[-1]).any() or np.isinf(stack_bool[-1]).any():
print("problem operator:",n)
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def transform(value, left_scale, right_scale, scale=0):
if left_scale is None or right_scale is None:
raise Exception('Left or Right scales cannot be None.')
if scale not in [0, 1]:
raise Exception('Scale must be 0 or 1.')
invalid_err = np.geterr().get('invalid')
invalid_err = np.geterr().get('invalid')
np.seterr(invalid='ignore')
if scale == 0:
range_ = np.absolute(right_scale - left_scale)
translated_value = np.abs(value - left_scale)
ret_val = (translated_value / range_)
else:
if left_scale <= 0.0:
raise Exception()
ls = np.log10(left_scale)
rs = np.log10(right_scale)
range_ = rs - ls
translated_value = np.log10(value) - ls
ret_val = (translated_value / range_)
np.seterr(invalid=invalid_err)
return ret_val
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')