def compute_mean_and_var(self, x):
# mean = self.mean_layer(x)
mean = F.tanh(self.mean_layer(x)) * 2.0
var = F.softplus(self.var_layer(x))
return mean, var
python类tanh()的实例源码
def compute_mean_and_var(self, x):
# mean = self.mean_layer(x)
mean = F.tanh(self.mean_layer(x)) * 2.0
var = F.softplus(F.broadcast_to(self.var_layer(x), mean.data.shape))
return mean, var
def __init__(self):
super(Generator_ResBlock_6, self).__init__(
c1 = CBR(3, 32, bn=True, sample='none-7'),
c2 = CBR(32, 64, bn=True, sample='down'),
c3 = CBR(64, 128, bn=True, sample='down'),
c4 = ResBlock(128, bn=True),
c5 = ResBlock(128, bn=True),
c6 = ResBlock(128, bn=True),
c7 = ResBlock(128, bn=True),
c8 = ResBlock(128, bn=True),
c9 = ResBlock(128, bn=True),
c10 = CBR(128, 64, bn=True, sample='up'),
c11 = CBR(64, 32, bn=True, sample='up'),
c12 = CBR(32, 3, bn=True, sample='none-7', activation=F.tanh)
)
def __call__(self, x, test=False):
h = self.b1(F.elu(self.c1(x)), test=test)
h = self.b2(F.elu(self.c2(h)), test=test)
h = self.b3(F.elu(self.c3(h)), test=test)
h = self.r1(h, test=test)
h = self.r2(h, test=test)
h = self.r3(h, test=test)
h = self.r4(h, test=test)
h = self.r5(h, test=test)
h = self.b4(F.elu(self.d1(h)), test=test)
h = self.b5(F.elu(self.d2(h)), test=test)
y = self.d3(h)
return (F.tanh(y)+1)*127.5
def encode(self, x):
h1 = F.tanh(self.le1(x))
mu = self.le2_mu(h1)
ln_var = self.le2_ln_var(h1) # log(sigma**2)
return mu, ln_var
def decode(self, z, sigmoid=True):
h1 = F.tanh(self.ld1(z))
h2 = self.ld2(h1)
if sigmoid:
return F.sigmoid(h2)
else:
return h2
def _attend(self, p):
p = self.xh(p)
p = F.expand_dims(p, 1)
p = F.broadcast_to(p, self.shape2)
h = F.tanh(self.h + p)
shape3 = (self.batchsize * self.src_len, self.dim_hid)
h_reshaped = F.reshape(h, shape3)
weight_reshaped = self.hw(h_reshaped)
weight = F.reshape(weight_reshaped, (self.batchsize, self.src_len, 1))
weight = F.where(self.mask, weight, self.minf)
attention = F.softmax(weight)
return attention
def planar_flows(self,z):
self.z_trans = []
self.z_trans.append(z)
self.phi = []
for i in range(self.num_trans):
flow_w_name = 'flow_w_' + str(i)
flow_b_name = 'flow_b_' + str(i)
flow_u_name = 'flow_u_' + str(i)
h = self[flow_w_name](z)
h = F.sum(h,axis=(1))
h = self[flow_b_name](h)
h = F.tanh(h)
h_tanh = h
dim_latent = z.shape[1]
h = F.transpose(F.tile(h, (dim_latent,1)))
h = self[flow_u_name](h)
z += h
self.z_trans.append(z)
# Calculate and store the phi term
h_tanh_derivative = 1-(h_tanh*h_tanh)
h_tanh_derivative = F.transpose(F.tile(h_tanh_derivative, (dim_latent,1)))
phi = self[flow_w_name](h_tanh_derivative) # Equation (11)
self.phi.append(phi)
return z
def planar_flows(self,z):
self.z_trans = []
self.z_trans.append(z)
self.phi = []
for i in range(self.num_trans):
flow_w_name = 'flow_w_' + str(i)
flow_b_name = 'flow_b_' + str(i)
flow_u_name = 'flow_u_' + str(i)
h = self[flow_w_name](z)
h = F.sum(h,axis=(1))
h = self[flow_b_name](h)
h = F.tanh(h)
h_tanh = h
dim_latent = z.shape[1]
h = F.transpose(F.tile(h, (dim_latent,1)))
h = self[flow_u_name](h)
z += h
self.z_trans.append(z)
# Calculate and store the phi term
h_tanh_derivative = 1-(h_tanh*h_tanh)
h_tanh_derivative = F.transpose(F.tile(h_tanh_derivative, (dim_latent,1)))
phi = self[flow_w_name](h_tanh_derivative) # Equation (11)
self.phi.append(phi)
return z
def Tanh():
return functions.tanh
# Pooling
def encode(self, x):
h1 = F.tanh(self.le1(x))
mu = self.le2_mu(h1)
ln_var = self.le2_ln_var(h1) # log(sigma**2)
return mu, ln_var
def decode(self, z, sigmoid=True):
h1 = F.tanh(self.ld1(z))
h2 = self.ld2(h1)
if sigmoid:
return F.sigmoid(h2)
else:
return h2
def node(self, left, right):
return F.tanh(self.l(F.concat((left, right))))
def forward(self):
x = chainer.Variable(self.x)
return functions.tanh(x, use_cudnn=self.use_cudnn)
def check_forward(self, x_data, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.tanh(x, use_cudnn=use_cudnn)
self.assertEqual(y.data.dtype, self.dtype)
y_expect = functions.tanh(chainer.Variable(self.x))
gradient_check.assert_allclose(y_expect.data, y.data)
def Q_func(self, state, train=True):
test = not train
s = Variable(state)
h = F.tanh(self.bn1(self.fc1(s),test=test))
h = F.tanh(self.bn2(self.fc2(h),test=test))
h = F.tanh(self.bn3(self.fc3(h),test=test))
h = F.tanh(self.bn4(self.fc4(h),test=test))
h = F.tanh(self.bn5(self.fc5(h),test=test))
Q = self.q_value(h)
return Q
def Q_func(self, state):
s = Variable(state)
h = F.tanh(self.fc1(state))
h = F.tanh(self.fc2(h))
h = F.tanh(self.fc3(h))
h = F.tanh(self.fc4(h))
h = F.tanh(self.fc5(h))
Q = self.q_value(h)
return Q
def _initialize_decoder(self, pc, p):
return F.tanh(self.pc_qc(pc)), F.tanh(self.p_q(p))
def _initialize_decoder(self, fc, bc, f, b):
return (
F.tanh(self.fc_pc(fc) + self.bc_pc(bc)),
F.tanh(self.f_p(f) + self.b_p(b)))
def __call__(self, x):
return functions.tanh(self.w_xy(x))