def logistic_grad_bin(w, X, Y, alpha):
"""
Implementation of the logistic loss gradient when Y is a binary probability
distribution.
"""
grad = np.empty_like(w)
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_features + 1)
if fit_intercept:
intercept = w[-1]
w = w[:-1]
else:
intercept = 0
z = safe_sparse_dot(X, w.T) + intercept
_, n_features = X.shape
z0 = - (Y[:, 1] + (expit(-z) - 1))
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
if fit_intercept:
grad[-1] = z0.sum()
return grad.flatten()
python类expit()的实例源码
def predict(self, x):
_x = np.ones((x.shape[0], x.shape[1] + 1))
_x[:, : - 1] = x
score = expit(np.inner(self.w, _x))
signs = np.sign(score - .5)
return [0 if x == -1 else 1 for x in signs]
def activation(x):
#return expit(x)
##return 1.7159 * math.tanh(2/3*x)
#print(x)
return np.tanh(x)#list(map(math.tanh, x))
#return np.multiply(x > 0, x)
def dactivation(x):
#v = expit(x)
#return v*(1-v)
#return 1 - math.tanh(x)**2
return 1 - np.tanh(x)**2#list(map(lambda y: 1 - math.tanh(y)**2, x))
#return np.float64(x > 0)
def train_cbow_pair(model, word, input_word_indices, l1, alpha, learn_vectors=True, learn_hidden=True):
neu1e = zeros(l1.shape)
if model.hs:
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
fa = expit(dot(l1, l2a.T)) # propagate hidden -> output
ga = (1. - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
fb = expit(dot(l1, l2b.T)) # propagate hidden -> output
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
if learn_vectors:
# learn input -> hidden, here for all words in the window separately
if not model.cbow_mean and input_word_indices:
neu1e /= len(input_word_indices)
for i in input_word_indices:
model.wv.syn0[i] += neu1e * model.syn0_lockf[i]
return neu1e
def predict_x_mean(self, noisy_data_x, noise_prob=0):
"""
Calculate the predicted mean given input data_x.
:param data_x: binary input with dimension (dim_input, 1)
"""
## hidden layer
h = expit(self.bias_hidden + self.W.dot(noisy_data_x))
## predicted x
x_mean = expit(self.bias_input + self.W.transpose().dot(h))
return (h, x_mean)
def activate(aValue):
"""
activate function: sigmoid
g(a) = 1/(1+exp(-a)); same dimension as aValue
"""
return special.expit(aValue)
def energy_gradient(self, x):
"""
Calculate the (estimated) gradient of energy E(h, x) at given x,
with respect to W, bias_input, bias_hidden at current values.
:param x: input vector with shape (dim_input, 1)
"""
h_mean = expit(self.bias_hidden + self.W.dot(x))
grad_W = -h_mean.dot(x.transpose())
grad_bias_input = -x
grad_bias_hidden = -h_mean
return (grad_W, grad_bias_input, grad_bias_hidden)
def gibbs_sample_h(self, x):
"""
Sample a new h from p(h|x) using current parameters.
:param x: shape (dim_input, 1)
:return: shape (dim_hidden, 1)
"""
h_mean = expit(self.bias_hidden + self.W.dot(x))
return self.bernoulli(h_mean)
def gibbs_sample_x(self, h):
"""
Sample a new x from p(x|h) using current parameters.
:param h: shape (dim_hidden, 1)
:return: shape (dim_input, 1)
"""
x_mean = expit(self.bias_input + self.W.transpose().dot(h))
return self.bernoulli(x_mean)
def computeWeight(rawData,numEvents):
# to avoid the case that a term never occurs in a document, we add 1 to the cnt
numLines,numEvents=rawData.shape
weightedData=np.zeros((numLines,numEvents),float)
for j in range(numEvents):
cnt = np.count_nonzero(rawData[:,j])
for i in range(numLines):
weight = 0.5 * expit(math.log(numLines/float(cnt)))
weightedData[i,j] = rawData[i,j] * weight
print('weighted data size is',weightedData.shape)
return weightedData
def computeWeight(rawData):
# to avoid the case that a term never occurs in a document, we add 1 to the cnt
numLines,numEvents=rawData.shape
weightedData=np.zeros((numLines,numEvents),float)
for j in range(numEvents):
cnt = np.count_nonzero(rawData[:,j])
for i in range(numLines):
weight = 0.5 * expit(math.log(numLines/float(cnt)))
weightedData[i,j] = rawData[i,j] * weight
print('weighted data size is',weightedData.shape)
return weightedData
def stationary_nonlinearity(self, stim):
"""Stationary nonlinearity
Nonlinearly rescale a temporal signal `stim` across space and time,
based on a sigmoidal function dependent on the maximum value of `stim`.
This is Box 4 in Nanduri et al. (2012).
The parameter values of the asymptote, slope, and shift of the logistic
function are given by self.asymptote, self.slope, and self.shift,
respectively.
Parameters
----------
stim: array
Temporal signal to process, stim(r, t) in Nanduri et al. (2012).
Returns
-------
Rescaled signal, b4(r, t) in Nanduri et al. (2012).
Notes
-----
Conversion to TimeSeries is avoided for the sake of speedup.
"""
# use expit (logistic) function for speedup
sigmoid = ss.expit((stim.max() - self.shift) / self.slope)
return stim * sigmoid
def sigmoid(a):
h = expit(a)
return h, h * (1 - h)
embedding.py 文件源码
项目:nodeembedding-to-communityembedding
作者: andompesta
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def gradient_update(positive_node_embedding, negative_nodes_embedding, neg_labels, _alpha):
'''
Perform stochastic gradient descent of the first and second order embedding.
NOTE: using the cython implementation (fast_community_sdg_X) is much more fast
'''
fb = sigmoid(np.dot(positive_node_embedding, negative_nodes_embedding.T)) # propagate hidden -> output
gb = (neg_labels - fb) * _alpha# vector of error gradients multiplied by the learning rate
return gb
node_embeddings.py 文件源码
项目:nodeembedding-to-communityembedding
作者: andompesta
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def loss(self, model, edges):
ret_loss = 0
for edge in prepare_sentences(model, edges):
assert len(edge) == 2, "edges have to be done by 2 nodes :{}".format(edge)
ret_loss -= np.log(sigmoid(np.dot(model.node_embedding[edge[1].index], model.node_embedding[edge[0].index].T)))
return ret_loss
def grad(self, actual, predicted):
return actual * expit(-actual * predicted)
def hess(self, actual, predicted):
expits = expit(predicted)
return expits * (1 - expits)
def transform(self, output):
# Apply logistic (sigmoid) function to the output
return expit(output)
def sigmoid(x, out):
if out is not x:
out[:] = x
np.negative(out, out)
np.exp(out, out)
out += 1
np.reciprocal(out, out)
return out