def __init__(self, in_ch=3, out_len=128, base_size=128, down_layers=4, use_bn=True, w_init=None):
layers = {}
self.down_layers = down_layers
if use_bn:
norm = 'bn'
else:
norm = None
act = F.relu
#if w_init is None:
# w_init = chainer.initializers.Normal(0.02)
layers['c_first'] = NNBlock(in_ch, base_size, nn='down_conv', norm=None, activation=act, w_init=w_init)
base = base_size
for i in range(down_layers-1):
layers['c'+str(i)] = NNBlock(base, base*2, nn='down_conv', norm=norm, activation=act, w_init=w_init)
base*=2
layers['c_last'] = NNBlock(None, out_len, nn='linear', norm=None, activation=None, w_init=w_init)
super(DCGANEncoder, self).__init__(**layers)
python类relu()的实例源码
def feature_map_activations(self, x):
"""Forward pass through the convolutional layers of the VGG returning
all of its intermediate feature map activations."""
hs = []
pre_pooling_sizes = []
h = x
for conv_block, mp in zip(self.conv_blocks, self.mps):
for conv in conv_block:
h = F.relu(conv(h))
pre_pooling_sizes.append(h.data.shape[2:])
# Disable cuDNN, else pooling indices will not be stored
with chainer.using_config('use_cudnn', 'never'):
h = mp.apply((h,))[0]
hs.append(h)
return hs, pre_pooling_sizes
def __init__(self, in_size, out_size, hidden_sizes, nonlinearity=F.relu,
last_wscale=1):
self.in_size = in_size
self.out_size = out_size
self.hidden_sizes = hidden_sizes
self.nonlinearity = nonlinearity
super().__init__()
with self.init_scope():
if hidden_sizes:
hidden_layers = []
hidden_layers.append(L.Linear(in_size, hidden_sizes[0]))
for hin, hout in zip(hidden_sizes, hidden_sizes[1:]):
hidden_layers.append(L.Linear(hin, hout))
self.hidden_layers = chainer.ChainList(*hidden_layers)
self.output = L.Linear(hidden_sizes[-1], out_size,
initialW=LeCunNormal(last_wscale))
else:
self.output = L.Linear(in_size, out_size,
initialW=LeCunNormal(last_wscale))
def __call__(self, state):
h = state
for layer in self.hidden_layers:
h = F.relu(layer(h))
v = self.v(h)
mu = self.mu(h)
if self.scale_mu:
mu = scale_by_tanh(mu, high=self.action_space.high,
low=self.action_space.low)
mat_diag = F.exp(self.mat_diag(h))
if hasattr(self, 'mat_non_diag'):
mat_non_diag = self.mat_non_diag(h)
tril = lower_triangular_matrix(mat_diag, mat_non_diag)
mat = matmul_v3(tril, tril, transb=True)
else:
mat = F.expand_dims(mat_diag ** 2, axis=2)
return QuadraticActionValue(
mu, mat, v, min_action=self.action_space.low,
max_action=self.action_space.high)
def __init__(self, n_actions, n_input_channels=4,
activation=F.relu, bias=0.1):
self.n_actions = n_actions
self.n_input_channels = n_input_channels
self.activation = activation
super().__init__()
with self.init_scope():
self.conv_layers = chainer.ChainList(
L.Convolution2D(n_input_channels, 32, 8, stride=4,
initial_bias=bias),
L.Convolution2D(32, 64, 4, stride=2, initial_bias=bias),
L.Convolution2D(64, 64, 3, stride=1, initial_bias=bias))
self.a_stream = MLP(3136, n_actions, [512])
self.v_stream = MLP(3136, 1, [512])
def __init__(self, n_dim_obs, n_dim_action, n_hidden_channels,
n_hidden_layers, nonlinearity=F.relu,
last_wscale=1.):
self.n_input_channels = n_dim_obs + n_dim_action
self.n_hidden_layers = n_hidden_layers
self.n_hidden_channels = n_hidden_channels
self.nonlinearity = nonlinearity
super().__init__(
in_size=self.n_input_channels,
out_size=1,
hidden_sizes=[self.n_hidden_channels] * self.n_hidden_layers,
nonlinearity=nonlinearity,
last_wscale=last_wscale,
)
def __init__(self, n_dim_obs, n_dim_action, n_hidden_channels,
n_hidden_layers, nonlinearity=F.relu, last_wscale=1.):
assert n_hidden_layers >= 1
self.n_input_channels = n_dim_obs + n_dim_action
self.n_hidden_layers = n_hidden_layers
self.n_hidden_channels = n_hidden_channels
self.nonlinearity = nonlinearity
super().__init__()
with self.init_scope():
# No need to pass nonlinearity to obs_mlp because it has no
# hidden layers
self.obs_mlp = MLP(in_size=n_dim_obs, out_size=n_hidden_channels,
hidden_sizes=[])
self.mlp = MLP(in_size=n_hidden_channels + n_dim_action,
out_size=1,
hidden_sizes=([self.n_hidden_channels] *
(self.n_hidden_layers - 1)),
nonlinearity=nonlinearity,
last_wscale=last_wscale,
)
self.output = self.mlp.output
def __init__(self, n_input_channels, n_actions,
n_hidden_layers=0, n_hidden_channels=None,
beta=1.0, nonlinearity=F.relu,
last_wscale=1.0,
min_prob=0.0):
self.n_input_channels = n_input_channels
self.n_actions = n_actions
self.n_hidden_layers = n_hidden_layers
self.n_hidden_channels = n_hidden_channels
self.beta = beta
super().__init__(
model=MLP(n_input_channels,
n_actions,
(n_hidden_channels,) * n_hidden_layers,
nonlinearity=nonlinearity,
last_wscale=last_wscale),
beta=self.beta,
min_prob=min_prob)
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False, noise=False):
self.bn = bn
self.activation = activation
self.dropout = dropout
self.sample = sample
self.noise = noise
layers = {}
w = chainer.initializers.Normal(0.02)
if sample=='down':
layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)
elif sample=='none-9':
layers['c'] = L.Convolution2D(ch0, ch1, 9, 1, 4, initialW=w)
elif sample=='none-7':
layers['c'] = L.Convolution2D(ch0, ch1, 7, 1, 3, initialW=w)
elif sample=='none-5':
layers['c'] = L.Convolution2D(ch0, ch1, 5, 1, 2, initialW=w)
else:
layers['c'] = L.Convolution2D(ch0, ch1, 3, 1, 1, initialW=w)
if bn:
if self.noise:
layers['batchnorm'] = L.BatchNormalization(ch1, use_gamma=False)
else:
layers['batchnorm'] = L.BatchNormalization(ch1)
super(CBR, self).__init__(**layers)
def __call__(self, x, t, train=True, finetune=False):
# First conv layer
h = self[0](x)
# Residual blocks
for i in range(1, len(self) - 2):
h = self[i](h, train, finetune)
# BN, relu, pool, final layer
h = self[-2](h)
h = F.relu(h)
n, nc, ns, nx, ny = h.data.shape
h = F.reshape(h, (n, nc * ns, nx, ny))
h = F.average_pooling_2d(h, ksize=h.data.shape[2:])
h = self[-1](h)
h = F.reshape(h, h.data.shape[:2])
return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
def __call__(self, x, t, train=True, finetune=False):
h = x
# First conv layer
h = self[0](h)
# Residual blocks
for i in range(1, len(self) - 2):
h = self[i](h, train, finetune)
# BN, relu, pool, final layer
h = self[-2](h)
h = F.relu(h)
h = F.average_pooling_2d(h, ksize=h.data.shape[2:])
h = self[-1](h)
h = F.reshape(h, h.data.shape[:2])
return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
def __call__(self, ht, xs, d_bar_s_1):
#ht:encoder?????????????????
#batch_size * n_words * in_size
#xs:??????
if d_bar_s_1 == None:
d_bar_s_1 = np.zeros(self.in_size)
ht_T = list(map(F.transpose, ht))
phi_ht = list(map(W1, ht_T))
d_s = rnn(d_bar_s_1, y_s_1)
phi_d = F.transpose_sequence(W2(F.transpose_sequence(d_s)))
u_st = list(map(lambda x: phi_d*x, phi_ht)) #(4)
sum_u = F.sum(u_st)
alpha_st = list(map(lambda x:x/sum_u, u_st)) #(3)
z_s = F.argmax(alpha_st, axis=0)
c_s = F.sum(list(map(lambda x,y:x*y , alpha_st, ht))) #(2)
d_bar_s = F.relu(W3(F.concat([c_s, d_s])))
return d_bar_s, d_s, c_s, z_s
def __init__(self, obs_size, n_actions, n_hidden_channels=[1024,256]):
super(QFunction,self).__init__()
net = []
inpdim = obs_size
for i,n_hid in enumerate(n_hidden_channels):
net += [ ('l{}'.format(i), L.Linear( inpdim, n_hid ) ) ]
net += [ ('norm{}'.format(i), L.BatchNormalization( n_hid ) ) ]
net += [ ('_act{}'.format(i), F.relu ) ]
inpdim = n_hid
net += [('output', L.Linear( inpdim, n_actions) )]
with self.init_scope():
for n in net:
if not n[0].startswith('_'):
setattr(self, n[0], n[1])
self.forward = net
def forward(self, ws, ss, ps):
batchsize = len(ws)
xp = chainer.cuda.get_array_module(ws[0])
ws = map(self.emb_word, ws)
ss = [F.reshape(self.emb_suf(s), (s.shape[0], 4 * self.afix_dim)) for s in ss]
ps = [F.reshape(self.emb_prf(s), (s.shape[0], 4 * self.afix_dim)) for s in ps]
# [(sentence length, (word_dim + suf_dim + prf_dim))]
xs_f = [F.dropout(F.concat([w, s, p]),
self.dropout_ratio, train=self.train) for w, s, p in zip(ws, ss, ps)]
xs_b = [x[::-1] for x in xs_f]
cx_f, hx_f, cx_b, hx_b = self._init_state(xp, batchsize)
_, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
_, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
hs_b = [x[::-1] for x in hs_b]
# ys: [(sentence length, number of category)]
ys = [self.linear2(F.relu(
self.linear1(F.concat([h_f, h_b]))))
for h_f, h_b in zip(hs_f, hs_b)]
return ys
def predict(self, xs):
"""
batch: list of splitted sentences
"""
xs = [self.extractor.process(x) for x in xs]
batchsize = len(xs)
ws, cs, ls = zip(*xs)
ws = map(self.emb_word, ws)
cs = [F.squeeze(
F.max_pooling_2d(
self.conv_char(
F.expand_dims(
self.emb_char(c), 1)), (l, 1)))
for c, l in zip(cs, ls)]
xs_f = [F.dropout(F.concat([w, c]),
self.dropout_ratio, train=self.train) for w, c in zip(ws, cs)]
xs_b = [x[::-1] for x in xs_f]
cx_f, hx_f, cx_b, hx_b = self._init_state(batchsize)
_, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
_, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
hs_b = [x[::-1] for x in hs_b]
ys = [self.linear2(F.relu(self.linear1(F.concat([h_f, h_b]))))
for h_f, h_b in zip(hs_f, hs_b)]
return [y.data[1:-1] for y in ys]
def __call__(self, ws, cs, ls, ts):
h_w = self.emb_word(ws) #_(batchsize, windowsize, word_dim)
h_c = self.emb_char(cs) # (batchsize, windowsize, max_char_len, char_dim)
batchsize, windowsize, _, _ = h_c.data.shape
# (batchsize, windowsize, char_dim)
h_c = F.sum(h_c, 2)
h_c, ls = F.broadcast(h_c, F.reshape(ls, (batchsize, windowsize, 1)))
h_c = h_c / ls
h = F.concat([h_w, h_c], 2)
h = F.reshape(h, (batchsize, -1))
# ys = self.linear1(h)
h = F.relu(self.linear1(h))
h = F.dropout(h, ratio=.5, train=self.train)
ys = self.linear2(h)
loss = F.softmax_cross_entropy(ys, ts)
acc = F.accuracy(ys, ts)
chainer.report({
"loss": loss,
"accuracy": acc
}, self)
return loss
def forward(self, ws, ss, ps):
batchsize, length = ws.shape
xp = chainer.cuda.get_array_module(ws[0])
ws = self.emb_word(ws) # (batch, length, word_dim)
ss = F.reshape(self.emb_suf(ss), (batchsize, length, -1))
ps = F.reshape(self.emb_prf(ps), (batchsize, length, -1))
hs = F.transpose(F.concat([ws, ss, ps], 2), (1, 0, 2))
hs = F.dropout(hs, self.dropout_ratio, train=self.train)
hs = F.split_axis(hs, length, 0)
hs_f = []
hs_b = []
self._init_state()
for h_in_f, h_in_b in zip(hs, reversed(hs)):
h_f = self.lstm_f2(self.lstm_f1(F.squeeze(h_in_f, 0)))
hs_f.append(h_f)
h_b = self.lstm_b2(self.lstm_b1(F.squeeze(h_in_b, 0)))
hs_b.append(h_b)
ys = [self.linear2(F.relu(self.linear1(F.concat([h_f, h_b]))))
for h_f, h_b in zip(hs_f, reversed(hs_b))]
return ys
def __call__(self, x, train=True):
hlist = []
h_0 = self['embed'](x)
if not self.non_static:
h_0 = Variable(h_0.data)
h_1 = F.reshape(h_0, (h_0.shape[0], 1, h_0.shape[1], h_0.shape[2]))
for filter_h in self.filter_sizes:
pool_size = (self.doc_length - filter_h + 1, 1)
h = F.max_pooling_2d(F.relu(self['conv' + str(filter_h)](h_1)), pool_size)
hlist.append(h)
h = F.concat(hlist)
pos = 0
while pos < len(self.hidden_units) - 1:
h = F.dropout(F.relu(self['l' + str(pos)](h)))
pos += 1
y = F.relu(self['l' + str(pos)](h))
return y
def __call__(self, x, t):
self.clear()
h = F.max_pooling_2d(F.relu(
F.local_response_normalization(self.conv1(x))), 3, stride=2)
h = F.max_pooling_2d(F.relu(
F.local_response_normalization(self.conv2(h))), 3, stride=2)
h = F.relu(self.conv3(h))
h = F.relu(self.conv4(h))
h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
h = F.dropout(F.relu(self.fc6(h)), train=self.train)
h = F.dropout(F.relu(self.fc7(h)), train=self.train)
h = self.fc8(h)
self.loss = F.softmax_cross_entropy(h, t)
self.accuracy = F.accuracy(h, t)
return self.loss
def __call__(self, x, t):
self.clear()
h = self.bn1(self.conv1(x), test=not self.train)
h = F.max_pooling_2d(F.relu(h), 3, stride=2)
h = self.bn2(self.conv2(h), test=not self.train)
h = F.max_pooling_2d(F.relu(h), 3, stride=2)
h = F.relu(self.conv3(h))
h = F.relu(self.conv4(h))
h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
h = F.dropout(F.relu(self.fc6(h)), train=self.train)
h = F.dropout(F.relu(self.fc7(h)), train=self.train)
h = self.fc8(h)
self.loss = F.softmax_cross_entropy(h, t)
self.accuracy = F.accuracy(h, t)
return self.loss
def __call__(self, x, t):
self.clear()
h = F.max_pooling_2d(F.relu(
F.local_response_normalization(self.conv1(x))), 3, stride=2)
h = F.max_pooling_2d(F.relu(
F.local_response_normalization(self.conv2(h))), 3, stride=2)
h = F.relu(self.conv3(h))
h = F.relu(self.conv4(h))
h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
h = F.dropout(F.relu(self.fc6(h)), train=self.train)
h = F.dropout(F.relu(self.fc7(h)), train=self.train)
h = self.fc8(h)
self.loss = F.softmax_cross_entropy(h, t)
self.accuracy = F.accuracy(h, t)
return self.loss
def __call__(self, x):
# assume x is on GPU 0
x1 = F.copy(x, 1)
z0 = self.first0(x)
z1 = self.first1(x1)
# sync
h0 = z0 + F.copy(z1, 0)
h1 = z1 + F.copy(z0, 1)
y0 = self.second0(F.relu(h0))
y1 = self.second1(F.relu(h1))
# sync
y = y0 + F.copy(y1, 0)
return y
def predict(self, x):
""" Predict 2D pose from image. """
# layer1
h = F.relu(self.conv1(x))
h = F.max_pooling_2d(h, 3, stride=2)
# layer2
h = F.relu(self.conv2(h))
h = F.max_pooling_2d(h, 3, stride=2)
# layer3-5
h = F.relu(self.conv3(h))
h = F.relu(self.conv4(h))
h = F.relu(self.conv5(h))
h = F.max_pooling_2d(h, 3, stride=2)
# layer6-8
h = F.dropout(F.relu(self.fc6(h)), train=self.train)
h = F.dropout(F.relu(self.fc7(h)), train=self.train)
h = self.fc8(h)
return F.reshape(h, (-1, self.Nj, 2))
convolutional_pose_machine.py 文件源码
项目:convolutional-pose-machines-chainer
作者: tomoyukun
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def __call__(self, x):
h = self.conv1(x)
h = F.relu(h)
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = self.conv2(h)
h = F.relu(h)
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = self.conv3(h)
h = F.relu(h)
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = self.conv4(h)
h = F.relu(h)
h = self.conv5(h)
h = F.relu(h)
h = self.conv6(h)
h = F.relu(h)
h = self.conv7(h)
return h
convolutional_pose_machine.py 文件源码
项目:convolutional-pose-machines-chainer
作者: tomoyukun
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def __call__(self, pmap, fmap, cmap):
fmap = self.conv0(fmap)
fmap = F.relu(fmap)
cmap = F.average_pooling_2d(cmap, ksize=8, stride=8)
h = F.concat((fmap, pmap, cmap), 1)
h = self.conv1(h)
h = F.relu(h)
h = self.conv2(h)
h = F.relu(h)
h = self.conv3(h)
h = F.relu(h)
h = self.conv4(h)
h = F.relu(h)
h = self.conv5(h)
return h
__init__.py 文件源码
项目:convolutional-pose-machines-chainer
作者: tomoyukun
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def __call__(self, x):
h = self.conv1(x)
h = F.relu(h)
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = self.conv2(h)
h = F.relu(h)
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = self.conv3(h)
h = F.relu(h)
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = self.conv4(h)
h = F.relu(h)
h = self.conv5(h)
h = F.relu(h)
h = self.conv6(h)
h = F.relu(h)
h = self.conv7(h)
return h
def __call__(self, x):
h = F.relu(self.conv1_1(x))
h = F.relu(self.conv1_2(h))
h = F.max_pooling_2d(h, 2, 2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
h = F.max_pooling_2d(h, 2, 2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.relu(self.conv3_3(h))
h = F.relu(self.conv3_4(h))
h = F.max_pooling_2d(h, 2, 2)
h = F.relu(self.fc4(h))
h = F.relu(self.fc5(h))
h = self.fc6(h)
L_out = h
return L_out
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def __call__(self, x):
h = F.relu(self.conv1_1(x))
h = F.relu(self.conv1_2(h))
h = F.average_pooling_2d(h, 2, 2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
h = F.average_pooling_2d(h, 2, 2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.relu(self.conv3_3(h))
h = F.relu(self.conv3_4(h))
h = F.average_pooling_2d(h, 2, 2)
h = F.relu(self.fc4(h))
h = F.relu(self.fc5(h))
h = self.fc6(h)
L_out = h
return L_out
def to_function(self):
if self.nonlinearity.lower() == "clipped_relu":
return clipped_relu()
if self.nonlinearity.lower() == "crelu":
return crelu()
if self.nonlinearity.lower() == "elu":
return elu()
if self.nonlinearity.lower() == "hard_sigmoid":
return hard_sigmoid()
if self.nonlinearity.lower() == "leaky_relu":
return leaky_relu()
if self.nonlinearity.lower() == "relu":
return relu()
if self.nonlinearity.lower() == "sigmoid":
return sigmoid()
if self.nonlinearity.lower() == "softmax":
return softmax()
if self.nonlinearity.lower() == "softplus":
return softplus()
if self.nonlinearity.lower() == "tanh":
return tanh()
if self.nonlinearity.lower() == "bst":
return bst()
raise NotImplementedError()
def __call__(self, x):
h = self.bconv1_1(x)
h = self.bconv1_2(h)
h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
h = self.bconv2_1(h)
h = self.bconv2_2(h)
h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
h = self.bconv3_1(h)
h = self.bconv3_2(h)
h = self.bconv3_3(h)
h = self.bconv3_4(h)
h = F.dropout(F.max_pooling_2d(h, 2), 0.25)
h = F.relu(self.fc4(F.dropout(h)))
h = F.relu(self.fc5(F.dropout(h)))
h = self.fc6(h)
return h