def forward(self, input, *args, **kwargs):
"""A sigmoid function is a mathematical function having a
characteristic "S"-shaped curve or sigmoid curve. Often,
sigmoid function refers to the special case of the logistic
function and defined by the formula :math:`\\varphi(x) = \\frac{1}{1 + e^{-x}}`
(given the input :math:`x`).
Parameters
----------
input : float32
The activation (the summed, weighted input of a neuron).
Returns
-------
float32 in [0, 1]
The output of the sigmoid function applied to the activation.
"""
self.last_forward = 1.0 / (1.0 + np.exp(-input))
return self.last_forward
python类exp()的实例源码
def forward(self, input):
""":math:`\\varphi(\\mathbf{x})_j =
\\frac{e^{\mathbf{x}_j}}{\sum_{k=1}^K e^{\mathbf{x}_k}}`
where :math:`K` is the total number of neurons in the layer. This
activation function gets applied row-wise.
Parameters
----------
x : float32
The activation (the summed, weighted input of a neuron).
Returns
-------
float32 where the sum of the row is 1 and each single value is in [0, 1]
The output of the softmax function applied to the activation.
"""
assert np.ndim(input) == 2
self.last_forward = input
x = input - np.max(input, axis=1, keepdims=True)
exp_x = np.exp(x)
s = exp_x / np.sum(exp_x, axis=1, keepdims=True)
return s
def __call__(self, params):
print '???', params
sd1 = params[0]
sd2 = params[1]
cor = params[2]
if sd1 < 0. or sd1 > 10. or sd2 < 0. or sd2 > 10. or cor < -1. or cor > 1.:
return np.inf
bandwidth = maths.stats.choleskysqrt2d(sd1, sd2, cor)
bandwidthdet = la.det(bandwidth)
bandwidthinv = la.inv(bandwidth)
diff = sample[self.__iidx] - sample[self.__jidx]
temp = diff.dot(bandwidthinv.T)
temp *= temp
e = np.exp(np.sum(temp, axis=1))
s = np.sum(e**(-.25) - 4 * e**(-.5))
cost = self.__n / bandwidthdet + (2. / bandwidthdet) * s
print '!!!', cost
return cost / 10000.
def step(self, mode):
if mode == "train" and self.mode == "test":
raise Exception("Cannot train during test mode")
if mode == "train":
theano_fn = self.train_fn
batch_gen = self.train_batch_gen
elif mode == "test":
theano_fn = self.test_fn
batch_gen = self.test_batch_gen
else:
raise Exception("Invalid mode")
data = next(batch_gen)
ys = data[-1]
data = data[:-1]
ret = theano_fn(*data)
return {"prediction": np.exp(ret[0]) - 1,
"answers": ys,
"current_loss": ret[1],
"loss_reg": ret[2],
"loss_mse": ret[1] - ret[2],
"log": ""}
def evaluation(self, X_test, y_test):
# normalization
X_test = self.normalization(X_test)
# average over the output
pred_y_test = np.zeros([self.M, len(y_test)])
prob = np.zeros([self.M, len(y_test)])
'''
Since we have M particles, we use a Bayesian view to calculate rmse and log-likelihood
'''
for i in range(self.M):
w1, b1, w2, b2, loggamma, loglambda = self.unpack_weights(self.theta[i, :])
pred_y_test[i, :] = self.nn_predict(X_test, w1, b1, w2, b2) * self.std_y_train + self.mean_y_train
prob[i, :] = np.sqrt(np.exp(loggamma)) /np.sqrt(2*np.pi) * np.exp( -1 * (np.power(pred_y_test[i, :] - y_test, 2) / 2) * np.exp(loggamma) )
pred = np.mean(pred_y_test, axis=0)
# evaluation
svgd_rmse = np.sqrt(np.mean((pred - y_test)**2))
svgd_ll = np.mean(np.log(np.mean(prob, axis = 0)))
return (svgd_rmse, svgd_ll)
def svgd_kernel(self, theta, h = -1):
sq_dist = pdist(theta)
pairwise_dists = squareform(sq_dist)**2
if h < 0: # if h < 0, using median trick
h = np.median(pairwise_dists)
h = np.sqrt(0.5 * h / np.log(theta.shape[0]+1))
# compute the rbf kernel
Kxy = np.exp( -pairwise_dists / h**2 / 2)
dxkxy = -np.matmul(Kxy, theta)
sumkxy = np.sum(Kxy, axis=1)
for i in range(theta.shape[1]):
dxkxy[:, i] = dxkxy[:,i] + np.multiply(theta[:,i],sumkxy)
dxkxy = dxkxy / (h**2)
return (Kxy, dxkxy)
def _linear_phase(self, n_shift):
"""
Private: Select the center of FOV
"""
om = self.st['om']
M = self.st['M']
final_shifts = tuple(
numpy.array(n_shift) +
numpy.array(self.st['Nd']) / 2)
phase = numpy.exp(
1.0j *
numpy.sum(
om * numpy.tile(
final_shifts,
(M,1)),
1))
# add-up all the linear phasees in all axes,
self.st['p'] = scipy.sparse.diags(phase, 0).dot(self.st['p0'])
def nufft_scale1(N, K, alpha, beta, Nmid):
'''
calculate image space scaling factor
'''
# import types
# if alpha is types.ComplexType:
alpha = numpy.real(alpha)
# print('complex alpha may not work, but I just let it as')
L = len(alpha) - 1
if L > 0:
sn = numpy.zeros((N, 1))
n = numpy.arange(0, N).reshape((N, 1), order='F')
i_gam_n_n0 = 1j * (2 * numpy.pi / K) * (n - Nmid) * beta
for l1 in range(-L, L + 1):
alf = alpha[abs(l1)]
if l1 < 0:
alf = numpy.conj(alf)
sn = sn + alf * numpy.exp(i_gam_n_n0 * l1)
else:
sn = numpy.dot(alpha, numpy.ones((N, 1), dtype=numpy.float32))
return sn
def linear_phase(self, n_shift):
'''
Select the center of FOV
'''
om = self.st['om']
M = self.st['M']
final_shifts = tuple(
numpy.array(n_shift) +
numpy.array(self.st['Nd']) / 2)
phase = numpy.exp(
1.0j *
numpy.sum(
om * numpy.tile(
final_shifts,
(M,1)),
1))
# add-up all the linear phasees in all axes,
self.st['p'] = scipy.sparse.diags(phase, 0).dot(self.st['p0'])
# multiply the diagonal, linear phase before the gridding matrix
def nufft_scale1(N, K, alpha, beta, Nmid):
'''
Calculate image space scaling factor
'''
# import types
# if alpha is types.ComplexType:
alpha = numpy.real(alpha)
# print('complex alpha may not work, but I just let it as')
L = len(alpha) - 1
if L > 0:
sn = numpy.zeros((N, 1))
n = numpy.arange(0, N).reshape((N, 1), order='F')
i_gam_n_n0 = 1j * (2 * numpy.pi / K) * (n - Nmid) * beta
for l1 in range(-L, L + 1):
alf = alpha[abs(l1)]
if l1 < 0:
alf = numpy.conj(alf)
sn = sn + alf * numpy.exp(i_gam_n_n0 * l1)
else:
sn = numpy.dot(alpha, numpy.ones((N, 1)))
return sn
def _linear_phase(self, n_shift):
"""
Private: Select the center of FOV
"""
om = self.st['om']
M = self.st['M']
final_shifts = tuple(
numpy.array(n_shift) +
numpy.array(self.st['Nd']) / 2)
phase = numpy.exp(
1.0j *
numpy.sum(
om * numpy.tile(
final_shifts,
(M,1)),
1))
# add-up all the linear phasees in all axes,
self.st['p'] = scipy.sparse.diags(phase, 0).dot(self.st['p0'])
def f(w, lamb):
"""
Eq. (2) in problem 2
Non-vectorized, slow
"""
total = 0
nrows = X.shape[0]
for i in range(nrows):
current = 1 + np.exp(-y[i] * X[i, ].dot(w))
total += np.log(current)
total += (lamb / 2) * w.dot(w)
return total
def f2(w, lamb):
"""
Eq. (2) in problem 2
Vectorized (no explicit loops), fast
"""
yxTw = y * X.dot(w)
firstpart = np.log(1 + np.exp(-yxTw))
total = firstpart.sum()
total += (lamb / 2) * w.dot(w)
return total
def pac_metric (solution, prediction, task='binary.classification'):
''' Probabilistic Accuracy based on log_loss metric.
We assume the solution is in {0, 1} and prediction in [0, 1].
Otherwise, run normalize_array.'''
debug_flag=False
[sample_num, label_num] = solution.shape
if label_num==1: task='binary.classification'
eps = 1e-15
the_log_loss = log_loss(solution, prediction, task)
# Compute the base log loss (using the prior probabilities)
pos_num = 1.* sum(solution) # float conversion!
frac_pos = pos_num / sample_num # prior proba of positive class
the_base_log_loss = prior_log_loss(frac_pos, task)
# Alternative computation of the same thing (slower)
# Should always return the same thing except in the multi-label case
# For which the analytic solution makes more sense
if debug_flag:
base_prediction = np.empty(prediction.shape)
for k in range(sample_num): base_prediction[k,:] = frac_pos
base_log_loss = log_loss(solution, base_prediction, task)
diff = np.array(abs(the_base_log_loss-base_log_loss))
if len(diff.shape)>0: diff=max(diff)
if(diff)>1e-10:
print('Arrggh {} != {}'.format(the_base_log_loss,base_log_loss))
# Exponentiate to turn into an accuracy-like score.
# In the multi-label case, we need to average AFTER taking the exp
# because it is an NL operation
pac = mvmean(np.exp(-the_log_loss))
base_pac = mvmean(np.exp(-the_base_log_loss))
# Normalize: 0 for random, 1 for perfect
score = (pac - base_pac) / sp.maximum(eps, (1 - base_pac))
return score
def softmax(x):
e_x = np.exp(x - np.max(x))
out = e_x / e_x.sum()
return out
def f(r,theta):
out = np.sin(theta)*r*np.exp(-r/2.)
out[-1] = 0
return out
def dfdr(r,theta):
out = np.sin(theta)*(np.exp(-r/2.) - (1./2.)*r*np.exp(-r/2.))
out[-1] = 0
return out
def dfdrdtheta(r,theta):
out = np.cos(theta)*(np.exp(-r/2.) - (1./2.)*r*np.exp(-r/2.))
out[-1] = 0
return out
def sample(self, probs, temperature):
if temperature == 0:
return np.argmax(probs)
probs = probs.astype(np.float64) #convert to float64 for higher precision
probs = np.log(probs) / temperature
probs = np.exp(probs) / math.fsum(np.exp(probs))
return np.argmax(np.random.multinomial(1, probs, 1))
#generate a sentence given conv_hidden
def softmax(x):
act = np.exp(x - np.max(x))
return act / act.sum()