def test_logaddexp_range(self):
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
python类logaddexp()的实例源码
def test_inf(self):
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
with np.errstate(invalid='raise'):
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp(logxf, logyf), logzf)
def test_nan(self):
assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, 0)))
assert_(np.isnan(np.logaddexp(0, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
def forward(self, x):
"""
Implementation of softplus. Overflow avoided by use of the logaddexp function.
self._lower is added before returning.
"""
return np.logaddexp(0, x) + self._lower
def ctc_loss(label, prob, remainder, seq_length, batch_size, num_gpu=1, big_num=1e10):
label_ = [0, 0]
prob[prob < 1 / big_num] = 1 / big_num
log_prob = np.log(prob)
l = len(label)
for i in range(l):
label_.append(int(label[i]))
label_.append(0)
l_ = 2 * l + 1
a = np.full((seq_length, l_ + 1), -big_num)
a[0][1] = log_prob[remainder][0]
a[0][2] = log_prob[remainder][label_[2]]
for i in range(1, seq_length):
row = i * int(batch_size / num_gpu) + remainder
a[i][1] = a[i - 1][1] + log_prob[row][0]
a[i][2] = np.logaddexp(a[i - 1][2], a[i - 1][1]) + log_prob[row][label_[2]]
for j in range(3, l_ + 1):
a[i][j] = np.logaddexp(a[i - 1][j], a[i - 1][j - 1])
if label_[j] != 0 and label_[j] != label_[j - 2]:
a[i][j] = np.logaddexp(a[i][j], a[i - 1][j - 2])
a[i][j] += log_prob[row][label_[j]]
return -np.logaddexp(a[seq_length - 1][l_], a[seq_length - 1][l_ - 1])
# label is done with remove_blank
# pred is got from pred_best
def _forward_cpu_one(self, x, t, W):
begin = self.begins[t]
end = self.begins[t + 1]
w = W[self.paths[begin:end]]
wxy = w.dot(x) * self.codes[begin:end]
loss = numpy.logaddexp(0.0, -wxy) # == log(1 + exp(-wxy))
return numpy.sum(loss)
def check_forward(self, x_data, t_data, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.softmax_cross_entropy(
x, t, use_cudnn=use_cudnn, normalize=self.normalize,
cache_score=self.cache_score)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, self.dtype)
self.assertEqual(hasattr(loss.creator, 'y'), self.cache_score)
loss_value = float(cuda.to_cpu(loss.data))
# Compute expected value
loss_expect = 0.0
count = 0
x = numpy.rollaxis(self.x, 1, self.x.ndim).reshape(
(self.t.size, self.x.shape[1]))
t = self.t.ravel()
for xi, ti in six.moves.zip(x, t):
if ti == -1:
continue
log_z = numpy.ufunc.reduce(numpy.logaddexp, xi)
loss_expect -= (xi - log_z)[ti]
count += 1
if self.normalize:
if count == 0:
loss_expect = 0.0
else:
loss_expect /= count
else:
loss_expect /= len(t_data)
gradient_check.assert_allclose(
loss_expect, loss_value, **self.check_forward_options)
def check_forward(self, x_data, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.log_softmax(x, use_cudnn)
self.assertEqual(y.data.dtype, self.dtype)
log_z = numpy.ufunc.reduce(
numpy.logaddexp, self.x, axis=1, keepdims=True)
y_expect = self.x - log_z
gradient_check.assert_allclose(
y_expect, y.data, **self.check_forward_options)
def js_with(self, p):
log_p = np.array([p.log_likelihood(ngram) for ngram in p.unique_ngrams()])
log_q = np.array([self.log_likelihood(ngram) for ngram in p.unique_ngrams()])
log_m = np.logaddexp(log_p - np.log(2), log_q - np.log(2))
kl_p_m = np.sum(np.exp(log_p) * (log_p - log_m))
log_p = np.array([p.log_likelihood(ngram) for ngram in self.unique_ngrams()])
log_q = np.array([self.log_likelihood(ngram) for ngram in self.unique_ngrams()])
log_m = np.logaddexp(log_p - np.log(2), log_q - np.log(2))
kl_q_m = np.sum(np.exp(log_q) * (log_q - log_m))
return 0.5*(kl_p_m + kl_q_m) / np.log(2)
test_umath.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 37
收藏 0
点赞 0
评论 0
def test_logaddexp_values(self):
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
xf = np.log(np.array(x, dtype=dt))
yf = np.log(np.array(y, dtype=dt))
zf = np.log(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)
test_umath.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def test_logaddexp_range(self):
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
test_umath.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def test_inf(self):
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
with np.errstate(invalid='raise'):
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp(logxf, logyf), logzf)
test_umath.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def test_nan(self):
assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, 0)))
assert_(np.isnan(np.logaddexp(0, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
def js_with(self, p):
log_p = np.array([p.log_likelihood(ngram) for ngram in p.unique_ngrams()])
log_q = np.array([self.log_likelihood(ngram) for ngram in p.unique_ngrams()])
log_m = np.logaddexp(log_p - np.log(2), log_q - np.log(2))
kl_p_m = np.sum(np.exp(log_p) * (log_p - log_m))
log_p = np.array([p.log_likelihood(ngram) for ngram in self.unique_ngrams()])
log_q = np.array([self.log_likelihood(ngram) for ngram in self.unique_ngrams()])
log_m = np.logaddexp(log_p - np.log(2), log_q - np.log(2))
kl_q_m = np.sum(np.exp(log_q) * (log_q - log_m))
return 0.5 * (kl_p_m + kl_q_m) / np.log(2)
def js_with(self, p):
log_p = np.array([p.log_likelihood(ngram) for ngram in p.unique_ngrams()])
log_q = np.array([self.log_likelihood(ngram) for ngram in p.unique_ngrams()])
log_m = np.logaddexp(log_p - np.log(2), log_q - np.log(2))
kl_p_m = np.sum(np.exp(log_p) * (log_p - log_m))
log_p = np.array([p.log_likelihood(ngram) for ngram in self.unique_ngrams()])
log_q = np.array([self.log_likelihood(ngram) for ngram in self.unique_ngrams()])
log_m = np.logaddexp(log_p - np.log(2), log_q - np.log(2))
kl_q_m = np.sum(np.exp(log_q) * (log_q - log_m))
return 0.5*(kl_p_m + kl_q_m) / np.log(2)
def evalObjectiveFunction(clean_biascount,Pword_plus,Pword_minus,ratio,removing_words):
mlog(evalObjectiveFunction.__name__,"call")
import numpy as np
import math
#Obj = np.log(1)
Obj = 0
for doc in clean_biascount:
Pdoc = calcProbabilityDocument(Pword_plus,Pword_minus,doc[1],ratio,removing_words)
i = clean_biascount.index(doc)
if i == 0:
mlog(evalObjectiveFunction.__name__,'Pdoc 0 ' + str(Pdoc) + ' type ' + str(type(Pdoc)))
t00 = np.exp(np.float64(Pdoc[0]))
t01 = np.exp(np.float64(Pdoc[1]))
t1 = t00 - t01
#print str(t1)
t2 = abs(t1)
Obj = np.log(t2)
#print str(Obj)
if i % 100 == 0:
mlog(evalObjectiveFunction.__name__,"Pdoc + " + str(i) + "= " + str(Pdoc))
if doc[0] == True:
Obj = np.logaddexp(Obj,Pdoc[0])
if i % 100 == 1:
mlog(evalObjectiveFunction.__name__,"Obj+ " + str(i) + " after += " + str(Obj))
Obj = np.log(np.exp(Obj) - np.exp(Pdoc[1]))
elif doc[0] == False:
Obj = np.log(np.exp(np.float64(Obj)) + np.exp(np.float64(Pdoc[1])))
if i % 100 == 2:
mlog(evalObjectiveFunction.__name__,"Obj- " + str(i) + " after += " + str(Obj))
Obj = np.log(np.exp(Obj) - np.exp(Pdoc[0]))
if Obj == 0.0:
mlog(evalObjectiveFunction.__name__, "Obj=0 fuck " + str(i) + "")
mlog(evalObjectiveFunction.__name__,"Obj = " + str(np.exp(Obj)))
if math.isnan(np.exp(Obj))==False:
print 'J = ' + str(np.exp(Obj))
return Obj #type np.log
def sigmoid(x):
x = np.array(x)
return np.exp(-np.logaddexp(0, -x))
def update_phi(self, doc_number, time):
"""
Update variational multinomial parameters, based on a document and a time-slice.
This is done based on the original Blei-LDA paper, where:
log_phi := beta * exp(?(gamma)), over every topic for every word.
TODO: incorporate lee-sueng trick used in **Lee, Seung: Algorithms for non-negative matrix factorization, NIPS 2001**.
"""
num_topics = self.lda.num_topics
# digamma values
dig = np.zeros(num_topics)
for k in range(0, num_topics):
dig[k] = digamma(self.gamma[k])
n = 0 # keep track of iterations for phi, log_phi
for word_id, count in self.doc:
for k in range(0, num_topics):
self.log_phi[n][k] = dig[k] + self.lda.topics[word_id][k]
log_phi_row = self.log_phi[n]
phi_row = self.phi[n]
# log normalize
v = log_phi_row[0]
for i in range(1, len(log_phi_row)):
v = np.logaddexp(v, log_phi_row[i])
# subtract every element by v
log_phi_row = log_phi_row - v
phi_row = np.exp(log_phi_row)
self.log_phi[n] = log_phi_row
self.phi[n] = phi_row
n +=1 # increase iteration
return self.phi, self.log_phi
def test_logaddexp_values(self):
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
xf = np.log(np.array(x, dtype=dt))
yf = np.log(np.array(y, dtype=dt))
zf = np.log(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)
def test_logaddexp_range(self):
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp(logxf, logyf), logzf)