def iaf(self, z, h, lin1, lin2):
ms = F.crelu(lin1(F.concat((z, h), axis=1)))
ms = lin2(ms)
m, s = F.split_axis(ms, 2, axis=1)
s = F.sigmoid(s)
z = s*z + (1-s)*m
# pdb.set_trace()
return z, -F.sum(F.log(s), axis=1)
python类sigmoid()的实例源码
def iaf(self, z, h, lin1, lin2):
ms = F.crelu(lin1(F.concat((z, h), axis=1)))
ms = lin2(ms)
m, s = F.split_axis(ms, 2, axis=1)
s = F.sigmoid(s)
z = s*z + (1-s)*m
# pdb.set_trace()
return z, -F.sum(F.log(s), axis=1)
def __call__(self, X):
pad = self._kernel_size[1] - 1
WX = self.W(X)
if pad > 0:
WX = WX[..., :-pad]
A, B = functions.split_axis(WX, 2, axis=1)
H = A * functions.sigmoid(B)
return H
# Connections
def zoneout(self, U):
if self._using_zoneout and chainer.config.train:
return 1 - zoneout(functions.sigmoid(-U), self._zoneout)
return functions.sigmoid(U)
def __call__(self, x, sigmoid=True):
"""AutoEncoder"""
return self.decode(self.encode(x)[0], sigmoid)
def decode(self, z, sigmoid=True):
h1 = F.tanh(self.ld1(z))
h2 = self.ld2(h1)
if sigmoid:
return F.sigmoid(h2)
else:
return h2
def get_loss_func(self, C=1.0, k=1, train=True):
"""Get loss function of VAE.
The loss value is equal to ELBO (Evidence Lower Bound)
multiplied by -1.
Args:
C (int): Usually this is 1.0. Can be changed to control the
second term of ELBO bound, which works as regularization.
k (int): Number of Monte Carlo samples used in encoded vector.
train (bool): If true loss_function is used for training.
"""
def lf(x):
mu, ln_var = self.encode(x)
batchsize = len(mu.data)
# reconstruction loss
rec_loss = 0
for l in six.moves.range(k):
z = F.gaussian(mu, ln_var)
rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) \
/ (k * batchsize)
self.rec_loss = rec_loss
self.loss = self.rec_loss + \
C * gaussian_kl_divergence(mu, ln_var) / batchsize
return self.loss
return lf
def setUp(self):
self.mlp = links.MLPConvolution2D(
3, (96, 96, 96), 11,
activation=functions.sigmoid,
use_cudnn=self.use_cudnn)
self.x = numpy.zeros((10, 3, 20, 20), dtype=numpy.float32)
def test_init(self):
self.assertIs(self.mlp.activation, functions.sigmoid)
self.assertEqual(len(self.mlp), 3)
for i, conv in enumerate(self.mlp):
self.assertIsInstance(conv, links.Convolution2D)
self.assertEqual(conv.use_cudnn, self.use_cudnn)
if i == 0:
self.assertEqual(conv.W.data.shape, (96, 3, 11, 11))
else:
self.assertEqual(conv.W.data.shape, (96, 96, 1, 1))
def setUp(self):
self.mlp = links.MLPConvolution2D(
3, (96, 96, 96), 11,
activation=functions.sigmoid,
use_cudnn=self.use_cudnn)
self.mlp.to_gpu()
self.x = cuda.cupy.zeros((10, 3, 20, 20), dtype=numpy.float32)
self.gy = cuda.cupy.zeros((10, 96, 10, 10), dtype=numpy.float32)
def check_forward(self, x_data, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.sigmoid(x, use_cudnn=use_cudnn)
self.assertEqual(y.data.dtype, self.dtype)
y_expect = functions.sigmoid(chainer.Variable(self.x))
gradient_check.assert_allclose(
y_expect.data, y.data, **self.check_forward_options)
def __call__(self, x, mask=None):
x = F.dropout(x, ratio=self.dropout)
out, pregate = F.split_axis(self.conv(x), 2, axis=1)
out = out * F.sigmoid(pregate)
if mask is not None:
out *= mask
return out
# TODO: For layers whose output is not directly fed to a gated linear
# unit, we initialize weights from N (0, p 1/nl) where nl is the number of
# input connections for each neuron.
def __init__(self, in_size, hidden_size, activation=F.sigmoid):
super(Perceptrons, self).__init__(
fc1 = L.Linear(in_size, hidden_size),
)
self.activation = activation
def _propagate(self, Y, dropout=0.):
blstm = self.blstm_layer(Y, dropout=dropout)
relu_1 = F.clipped_relu(self.relu_1(blstm, dropout=dropout))
relu_2 = F.clipped_relu(self.relu_2(relu_1, dropout=dropout))
N_mask = F.sigmoid(self.noise_mask_estimate(relu_2))
X_mask = F.sigmoid(self.speech_mask_estimate(relu_2))
return N_mask, X_mask
def _propagate(self, Y, dropout=0.):
relu_1 = F.clipped_relu(self.relu_1(Y, dropout=dropout))
relu_2 = F.clipped_relu(self.relu_2(relu_1, dropout=dropout))
relu_3 = F.clipped_relu(self.relu_3(relu_2, dropout=dropout))
N_mask = F.sigmoid(self.noise_mask_estimate(relu_3))
X_mask = F.sigmoid(self.speech_mask_estimate(relu_3))
return N_mask, X_mask
def __init__(self, use_cudnn=True):
self._function = "sigmoid"
self.use_cudnn = use_cudnn
def __call__(self, x):
return F.sigmoid(x, self.use_cudnn)
def _propagate(self, Y, dropout=0.):
blstm = self.blstm_layer(Y, dropout=dropout)
relu_1 = F.clipped_relu(self.relu_1(blstm, dropout=dropout))
relu_2 = F.clipped_relu(self.relu_2(relu_1, dropout=dropout))
N_mask = F.sigmoid(self.noise_mask_estimate(relu_2))
X_mask = F.sigmoid(self.speech_mask_estimate(relu_2))
return N_mask, X_mask
def _propagate(self, Y, dropout=0.):
relu_1 = F.clipped_relu(self.relu_1(Y, dropout=dropout))
N_mask = F.sigmoid(self.noise_mask_estimate(relu_1))
X_mask = F.sigmoid(self.speech_mask_estimate(relu_1))
return N_mask, X_mask
def __call__(self, x, sigmoid=True):
"""AutoEncoder"""
mu, ln_var = self.encode(x)
batchsize = len(mu.data)
# reconstruction loss
rec_loss = 0
for l in six.moves.range(self.k):
z = F.gaussian(mu, ln_var)
rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) \
/ (self.k * batchsize)
loss = rec_loss + \
self.C * gaussian_kl_divergence(mu, ln_var) / batchsize
chainer.report({'loss': loss}, self)
return loss