def gen_samples(self, num_samples):
"""Generate sample for ML near the snake."""
points = [] # the sample points
labels = [] # the labels
whichs = [] # the corresponding node for the sample
deri_g = [] # the partial derivative to g
deri_T = [] # the partial derivative to T
counter = 0
assert num_samples % self.length == 0
for i, (v, n) in enumerate(zip(self.vertices, self.normals())):
for d in np.linspace(-1, 1, num_samples / self.length):
# geometry
r = 2 * self.widths[i] * d
s = v + r * n
l = array([0.5 * (1. - np.tanh(d)),
0.5 * (1. + np.tanh(d))])
points.append(s)
labels.append(l)
whichs.append(i)
# cal derivatives
cosh_d = np.cosh(d)
deri_g.append(1 / (4 * self.widths[i] * cosh_d * cosh_d))
deri_T.append(d / (2 * self.widths[i] * cosh_d * cosh_d))
counter += 1
if counter == num_samples:
return array(points), array(labels), array(whichs), array(deri_g), array(deri_T)
python类tanh()的实例源码
def get(activation):
if activation.__class__.__name__ == 'str':
if activation in ['sigmoid', 'Sigmoid']:
return Sigmoid()
if activation in ['tan', 'tanh', 'Tanh']:
return Tanh()
if activation in ['relu', 'ReLU', 'RELU']:
return ReLU()
if activation in ['linear', 'Linear']:
return Linear()
if activation in ['softmax', 'Softmax']:
return Softmax()
if activation in ['elliot', 'Elliot']:
return Elliot()
if activation in ['symmetric_elliot', 'SymmetricElliot']:
return SymmetricElliot()
if activation in ['SoftPlus', 'soft_plus', 'softplus']:
return SoftPlus()
if activation in ['SoftSign', 'softsign', 'soft_sign']:
return SoftSign()
raise ValueError('Unknown activation name: {}.'.format(activation))
elif isinstance(activation, Activation):
return copy.deepcopy(activation)
else:
raise ValueError("Unknown type: {}.".format(activation.__class__.__name__))
def sigmoid(x):
return (1 + numpy.tanh(x / 2.0)) / 2
def inversetransformparameterndarray(parameterndarray, includejumps):
parameterndarray = npu.tondim1(parameterndarray)
res = [
parameterndarray[0], # meanlogvar
np.tanh(.5 * parameterndarray[1]), # persistence
np.sqrt(np.exp(parameterndarray[2])), # voloflogvar
np.tanh(.5 * parameterndarray[3]) # cor
]
if includejumps:
res.append(.5 * (np.tanh(parameterndarray[4]) + 1)) # jumpintensity
res.append(np.sqrt(np.exp(parameterndarray[5]))) # jumpvol
else:
res.append(0.)
res.append(1.)
return np.array(res)
def act(self):
return np.tanh(np.random.randn(self.dim_action)) # random action
def __init__(self, layers, activation = 'tanh'):
if activation == 'logistic':
self.activation = self.logistic
self.activation_deriv = self.logistic_derivative
elif activation == 'tanh':
self.activation = self.tanh
self.activation_deriv = self.tanh_deriv
'''
generate weight matrix with random float
'''
self.layers = layers
self.weights = []
for i in range(1, len(layers) - 1):
self.weights.append((2 * np.random.random((layers[i - 1] + 1, layers[i] + 1)) - 1) * 0.25)
self.weights.append((2 * np.random.random((layers[i] + 1, layers[i + 1])) - 1) * 0.25)
def tanh(x):
return np.tanh(x)
def tanh_deriv(x):
return 1.0 - np.tanh(x) * np.tanh(x)
def fcn_ComputeExcitation_FEM(f,sig,mur,a):
"""Compute Excitation Factor (FEM)"""
w = 2*np.pi*f
mu = 4*np.pi*1e-7*mur
alpha = a*np.sqrt(1j*w*mu*sig)
chi = 1.5*(2*mur*(np.tanh(alpha) - alpha) + (alpha**2*np.tanh(alpha) - alpha + np.tanh(alpha)))/(mur*(np.tanh(alpha) - alpha) - (alpha**2*np.tanh(alpha) - alpha + np.tanh(alpha)))
return chi
def g(x, t=4):
"""A transformation that suppresses outliers for a standard normal."""
xp = np.clip(x, -t, t)
diff = np.tanh(x - xp)
return xp + diff
def lossFun(inputs, targets, hprev):
"""
inputs, targets are both list of integers.
"""
xs, hs, ys, ps = {}, {}, {}, {}
hs[-1] = np.copy(hprev)
loss = 0
#forward pass
for t in range(len(inputs)):
xs[t] = np.zeros((vocab_size,1)) #encode in 1-of-k representation
xs[t][inputs[t]] = 1
hs[t] = np.tanh(np.dot(Wxh, xs[t])) + np.dot(Whh, hs[t-1]+bh)
ys[t] = np.dot(Why, hs[t]) + by
ps[t] = np.exp(ys[t])/np.sum(np.exp(ys[t])) #probabilities for next char
loss += -np.log(ps[t][targets[t],0]) #softmax cross-entropy loss
#backward pass
dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
dbh, dby = np.zeros_like(bh), np.zeros_like(by)
dhnext = np.zeros_like(hs[0])
for t in reversed(range(len(inputs))):
dy = np.copy(ps[t])
dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(Why.T, dy) + dhnext # backprop into h
dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
dbh += dhraw
dWxh += np.dot(dhraw, xs[t].T)
dWhh += np.dot(dhraw, hs[t-1].T)
dhnext = np.dot(Whh.T, dhraw)
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]
def lossFun(inputs, targets, hprev):
"""
inputs, targets are both list of integers.
"""
xs, hs, ys, ps = {}, {}, {}, {}
hs[-1] = np.copy(hprev)
loss = 0
#forward pass
for t in range(len(inputs)):
xs[t] = np.zeros((vocab_size,1)) #encode in 1-of-k representation
xs[t][inputs[t]] = 1
hs[t] = np.tanh(np.dot(Wxh, xs[t])) + np.dot(Whh, hs[t-1]+bh)
ys[t] = np.dot(Why, hs[t]) + by
ps[t] = np.exp(ys[t])/np.sum(np.exp(ys[t])) #probabilities for next char
loss += -np.log(ps[t][targets[t],0]) #softmax cross-entropy loss
#backward pass
dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
dbh, dby = np.zeros_like(bh), np.zeros_like(by)
dhnext = np.zeros_like(hs[0])
for t in reversed(range(len(inputs))):
dy = np.copy(ps[t])
dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(Why.T, dy) + dhnext # backprop into h
dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
dbh += dhraw
dWxh += np.dot(dhraw, xs[t].T)
dWhh += np.dot(dhraw, hs[t-1].T)
dhnext = np.dot(Whh.T, dhraw)
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]
def update_output(self, x):
self.output = np.tanh(x)
return self.output
def test_basic_ufuncs(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.cos(x), cos(xm))
assert_equal(np.cosh(x), cosh(xm))
assert_equal(np.sin(x), sin(xm))
assert_equal(np.sinh(x), sinh(xm))
assert_equal(np.tan(x), tan(xm))
assert_equal(np.tanh(x), tanh(xm))
assert_equal(np.sqrt(abs(x)), sqrt(xm))
assert_equal(np.log(abs(x)), log(xm))
assert_equal(np.log10(abs(x)), log10(xm))
assert_equal(np.exp(x), exp(xm))
assert_equal(np.arcsin(z), arcsin(zm))
assert_equal(np.arccos(z), arccos(zm))
assert_equal(np.arctan(z), arctan(zm))
assert_equal(np.arctan2(x, y), arctan2(xm, ym))
assert_equal(np.absolute(x), absolute(xm))
assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym))
assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True))
assert_equal(np.equal(x, y), equal(xm, ym))
assert_equal(np.not_equal(x, y), not_equal(xm, ym))
assert_equal(np.less(x, y), less(xm, ym))
assert_equal(np.greater(x, y), greater(xm, ym))
assert_equal(np.less_equal(x, y), less_equal(xm, ym))
assert_equal(np.greater_equal(x, y), greater_equal(xm, ym))
assert_equal(np.conjugate(x), conjugate(xm))
def test_testUfuncRegression(self):
# Tests new ufuncs on MaskedArrays.
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh',
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
'floor', 'ceil',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
'remainder', 'fmod', 'hypot', 'arctan2',
'equal', 'not_equal', 'less_equal', 'greater_equal',
'less', 'greater',
'logical_and', 'logical_or', 'logical_xor',
]:
try:
uf = getattr(umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(numpy.ma.core, f)
args = self.d[:uf.nin]
ur = uf(*args)
mr = mf(*args)
assert_equal(ur.filled(0), mr.filled(0), f)
assert_mask_equal(ur.mask, mr.mask, err_msg=f)
def test_testUfuncs1(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.assertTrue(eq(np.cos(x), cos(xm)))
self.assertTrue(eq(np.cosh(x), cosh(xm)))
self.assertTrue(eq(np.sin(x), sin(xm)))
self.assertTrue(eq(np.sinh(x), sinh(xm)))
self.assertTrue(eq(np.tan(x), tan(xm)))
self.assertTrue(eq(np.tanh(x), tanh(xm)))
with np.errstate(divide='ignore', invalid='ignore'):
self.assertTrue(eq(np.sqrt(abs(x)), sqrt(xm)))
self.assertTrue(eq(np.log(abs(x)), log(xm)))
self.assertTrue(eq(np.log10(abs(x)), log10(xm)))
self.assertTrue(eq(np.exp(x), exp(xm)))
self.assertTrue(eq(np.arcsin(z), arcsin(zm)))
self.assertTrue(eq(np.arccos(z), arccos(zm)))
self.assertTrue(eq(np.arctan(z), arctan(zm)))
self.assertTrue(eq(np.arctan2(x, y), arctan2(xm, ym)))
self.assertTrue(eq(np.absolute(x), absolute(xm)))
self.assertTrue(eq(np.equal(x, y), equal(xm, ym)))
self.assertTrue(eq(np.not_equal(x, y), not_equal(xm, ym)))
self.assertTrue(eq(np.less(x, y), less(xm, ym)))
self.assertTrue(eq(np.greater(x, y), greater(xm, ym)))
self.assertTrue(eq(np.less_equal(x, y), less_equal(xm, ym)))
self.assertTrue(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
self.assertTrue(eq(np.conjugate(x), conjugate(xm)))
self.assertTrue(eq(np.concatenate((x, y)), concatenate((xm, ym))))
self.assertTrue(eq(np.concatenate((x, y)), concatenate((x, y))))
self.assertTrue(eq(np.concatenate((x, y)), concatenate((xm, y))))
self.assertTrue(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
def test_testUfuncRegression(self):
f_invalid_ignore = [
'sqrt', 'arctanh', 'arcsin', 'arccos',
'arccosh', 'arctanh', 'log', 'log10', 'divide',
'true_divide', 'floor_divide', 'remainder', 'fmod']
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh',
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
'floor', 'ceil',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
'remainder', 'fmod', 'hypot', 'arctan2',
'equal', 'not_equal', 'less_equal', 'greater_equal',
'less', 'greater',
'logical_and', 'logical_or', 'logical_xor']:
try:
uf = getattr(umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(np.ma, f)
args = self.d[:uf.nin]
with np.errstate():
if f in f_invalid_ignore:
np.seterr(invalid='ignore')
if f in ['arctanh', 'log', 'log10']:
np.seterr(divide='ignore')
ur = uf(*args)
mr = mf(*args)
self.assertTrue(eq(ur.filled(0), mr.filled(0), f))
self.assertTrue(eqmask(ur.mask, mr.mask))
def tanh(x: Number = 0.0) -> Number:
return np.tanh(x)
# Constants times input n
def test_make_gru(dim_in=31, dim_h=11, dim_out=None,
i_net=None, a_net=None, o_net=None, c_net=None):
print 'Testing GRU formation'
if i_net is None:
i_net = dict(
dim_h=17,
n_layers=2,
h_act='T.tanh',
weight_scale=0.1,
)
if a_net is None:
a_net = dict(
dim_h=19,
n_layers=2,
h_act='T.tanh',
weight_scale=0.1
)
if o_net is None:
o_net = dict(
dim_h=23,
n_layers=2,
weight_scale=0.1,
distribution='binomial'
)
nets = dict(i_net=i_net, a_net=a_net, o_net=o_net, c_net=c_net)
trng = RandomStreams(101)
rnn = GRU.factory(dim_in=dim_in, dim_hs=[dim_h], dim_out=dim_out, **nets)
rnn.set_tparams()
print 'GRU formed correctly'
return rnn
def test_darn(dim_in=5, dim_h=3, dim_out=7, n_samples=13):
darn = DARN(dim_in, dim_h, dim_out, 2, h_act='T.tanh', out_act='T.nnet.sigmoid')
tparams = darn.set_tparams()
X = T.matrix('X', dtype=floatX)
H = T.matrix('H', dtype=floatX)
C = darn(H)
NLP = darn.neg_log_prob(X, C)
f = theano.function([X, H], [C, NLP])
x = np.random.randint(0, 2, size=(n_samples, dim_out)).astype(floatX)
h = np.random.randint(0, 2, size=(n_samples, dim_in)).astype(floatX)
c_t, nlp_t = f(x, h)
print c_t.shape
d_np = np.tanh(np.dot(h, darn.params['W0']) + darn.params['b0'])
c_np = np.dot(d_np, darn.params['W1']) + darn.params['b1']
assert np.allclose(c_t, c_np), (c_t, c_np)
z_np = np.zeros((n_samples, dim_out)).astype(floatX) + darn.params['bar'][None, :] + c_np
for i in xrange(dim_out):
for j in xrange(i + 1, dim_out):
z_np[:, i] += darn.params['War'][j, i] * x[:, j]
p_np = sigmoid(z_np)
p_np = np.clip(p_np, 1e-7, 1 - 1e-7)
nlp_np = (- x * np.log(p_np) - (1 - x) * np.log(1 - p_np)).sum(axis=1)
assert np.allclose(nlp_t, nlp_np), (nlp_t, nlp_np)
samples, updates_s = darn.sample(C, n_samples=n_samples-1)
f = theano.function([H], samples, updates=updates_s)
print f(h)