def forward(self, x):
x = self.embed(x) # (N,W,D)
if self.args.static:
x = Variable(x)
x = x.unsqueeze(1) # (N,Ci,W,D)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
x = torch.cat(x, 1)
'''
x1 = self.conv_and_pool(x,self.conv13) #(N,Co)
x2 = self.conv_and_pool(x,self.conv14) #(N,Co)
x3 = self.conv_and_pool(x,self.conv15) #(N,Co)
x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)
'''
x = self.dropout(x) # (N,len(Ks)*Co)
logit = self.fc1(x) # (N,C)
return logit
python类max_pool1d()的实例源码
model_CNN.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def forward(self, x):
x = self.embed(x) # (N,W,D)
x = self.dropout_embed(x)
x = x.unsqueeze(1) # (N,Ci,W,D)
if self.args.batch_normalizations is True:
x = [self.convs1_bn(F.tanh(conv(x))).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
else:
# x = [self.dropout(F.relu(conv(x)).squeeze(3)) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
# x = [self.dropout(F.tanh(conv(x)).squeeze(3)) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
# x = [F.tanh(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
x = torch.cat(x, 1)
x = self.dropout(x) # (N,len(Ks)*Co)
if self.args.batch_normalizations is True:
x = self.fc1_bn(self.fc1(x))
logit = self.fc2_bn(self.fc2(F.tanh(x)))
else:
logit = self.fc(x)
return logit
model_BiLSTM_1.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def forward(self, x):
x = self.embed(x)
x = self.dropout_embed(x)
# x = x.view(len(x), x.size(1), -1)
# x = embed.view(len(x), embed.size(1), -1)
bilstm_out, self.hidden = self.bilstm(x, self.hidden)
# print(self.hidden)
bilstm_out = torch.transpose(bilstm_out, 0, 1)
bilstm_out = torch.transpose(bilstm_out, 1, 2)
bilstm_out = F.tanh(bilstm_out)
bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
bilstm_out = F.tanh(bilstm_out)
# bilstm_out = self.dropout(bilstm_out)
# bilstm_out = self.hidden2label1(bilstm_out)
# logit = self.hidden2label2(F.tanh(bilstm_out))
logit = self.hidden2label(bilstm_out)
return logit
model_BiLSTM_lexicon.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def forward(self, x):
embed = self.embed(x)
embed = self.dropout(embed)
x = embed.view(len(x), embed.size(1), -1)
bilstm_out, self.hidden = self.bilstm(x, self.hidden)
# print("bbb {}".format(self.hidden[0]))
hidden = torch.cat(self.hidden, 0)
# print("ccc {}".format(hidden.size()))
hidden = torch.cat(hidden, 1)
# print("ddd {}".format(hidden.size()))
# bilstm_out = torch.transpose(bilstm_out, 0, 1)
# bilstm_out = torch.transpose(bilstm_out, 1, 2)
# print("aaa {}".format(bilstm_out.size()))
# bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
# bilstm_out = F.avg_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
# print("sss {}".format(bilstm_out.size()))
# print("Hidden {} ".format(hidden))
logit = self.hidden2label(F.tanh(hidden))
# print("Logit {} ".format(logit))
return logit
model.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def forward(self, x):
# print("aa", x)
x = self.embed(x) # (N,W,D)
# print("embed", x)
if self.args.static:
x = Variable(x.data)
# print("var", x)
x = x.unsqueeze(1) # (N,Ci,W,D)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
x = torch.cat(x, 1)
'''
x1 = self.conv_and_pool(x,self.conv13) #(N,Co)
x2 = self.conv_and_pool(x,self.conv14) #(N,Co)
x3 = self.conv_and_pool(x,self.conv15) #(N,Co)
x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)
'''
x = self.dropout(x) # (N,len(Ks)*Co)
logit = self.fc1(x) # (N,C)
return logit
model_CBiLSTM.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def forward(self, x):
embed = self.embed(x)
# CNN
embed = self.dropout(embed)
cnn_x = embed
cnn_x = cnn_x.unsqueeze(1)
cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks)
cnn_x = torch.cat(cnn_x, 0)
cnn_x = torch.transpose(cnn_x, 1, 2)
# BiLSTM
bilstm_out, self.hidden = self.bilstm(cnn_x, self.hidden)
bilstm_out = torch.transpose(bilstm_out, 0, 1)
bilstm_out = torch.transpose(bilstm_out, 1, 2)
bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
# linear
cnn_bilstm_out = self.hidden2label1(F.tanh(bilstm_out))
cnn_bilstm_out = self.hidden2label2(F.tanh(cnn_bilstm_out))
# dropout
logit = self.dropout(cnn_bilstm_out)
return logit
model_BiGRU.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def forward(self, input):
embed = self.embed(input)
embed = self.dropout(embed) # add this reduce the acc
input = embed.view(len(input), embed.size(1), -1)
# gru
gru_out, hidden = self.bigru(input, self.hidden)
gru_out = torch.transpose(gru_out, 0, 1)
gru_out = torch.transpose(gru_out, 1, 2)
# pooling
# gru_out = F.tanh(gru_out)
gru_out = F.max_pool1d(gru_out, gru_out.size(2)).squeeze(2)
gru_out = F.tanh(gru_out)
# linear
y = self.hidden2label(gru_out)
logit = y
return logit
model_DeepCNN.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def forward(self, x):
one_layer = self.embed(x) # (N,W,D) # torch.Size([64, 43, 300])
# one_layer = self.dropout(one_layer)
one_layer = one_layer.unsqueeze(1) # (N,Ci,W,D) # torch.Size([64, 1, 43, 300])
# one layer
one_layer = [torch.transpose(F.relu(conv(one_layer)).squeeze(3), 1, 2) for conv in self.convs1] # torch.Size([64, 100, 36])
# two layer
two_layer = [F.relu(conv(one_layer.unsqueeze(1))).squeeze(3) for (conv, one_layer) in zip(self.convs2, one_layer)]
print("two_layer {}".format(two_layer[0].size()))
# pooling
output = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in two_layer] # torch.Size([64, 100]) torch.Size([64, 100])
output = torch.cat(output, 1) # torch.Size([64, 300])
# dropout
output = self.dropout(output)
# linear
output = self.fc1(F.relu(output))
logit = self.fc2(F.relu(output))
return logit
model_CGRU.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def forward(self, x):
embed = self.embed(x)
# CNN
cnn_x = embed
cnn_x = self.dropout(cnn_x)
cnn_x = cnn_x.unsqueeze(1)
cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks)
cnn_x = torch.cat(cnn_x, 0)
cnn_x = torch.transpose(cnn_x, 1, 2)
# GRU
lstm_out, self.hidden = self.gru(cnn_x, self.hidden)
lstm_out = torch.transpose(lstm_out, 0, 1)
lstm_out = torch.transpose(lstm_out, 1, 2)
lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2)).squeeze(2)
# linear
cnn_lstm_out = self.hidden2label1(F.tanh(lstm_out))
cnn_lstm_out = self.hidden2label2(F.tanh(cnn_lstm_out))
# output
logit = cnn_lstm_out
return logit
def forward(self, x):
x = self.embeding(x)
x = x.unsqueeze(1)
x= [F.relu(conv(x).squeeze(3)) for conv in self.convs]
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x]
x = torch.cat(x, 1)
'''
x1 = self.conv_and_pool(x,self.conv13) #(N,Co)
x2 = self.conv_and_pool(x,self.conv14) #(N,Co)
x3 = self.conv_and_pool(x,self.conv15) #(N,Co)
x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)
'''
x = self.dropout(x)
out = self.fc(x)
return out
def forward(self, x):
x = self.embed(x) # (N,W,D)
if self.args.static:
x = Variable(x)
x = x.unsqueeze(1) # (N,Ci,W,D)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
x = torch.cat(x, 1)
'''
x1 = self.conv_and_pool(x,self.conv13) #(N,Co)
x2 = self.conv_and_pool(x,self.conv14) #(N,Co)
x3 = self.conv_and_pool(x,self.conv15) #(N,Co)
x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)
'''
x = self.dropout(x) # (N,len(Ks)*Co)
logit = self.fc1(x) # (N,C)
return logit
def forward(self, input):
x = F.elu(self.conv1(input))
x = F.elu(self.conv2(x))
x = F.elu(self.conv3(x))
# Next flatten the output to be batched into LSTM layers
# The shape of x is batch_size, channels, height, width
x = self.pre_lstm_bn(x)
x = torch.transpose(x, 1, 3)
x = torch.transpose(x, 1, 2)
x = x.contiguous()
x = x.view(x.size(0), self.batch, self.hidden_dim)
x, hidden = self.lstm(x, (self.hidden_state, self.cell_state))
self.hidden_state, self.cell_state = hidden
x = torch.transpose(x, 2, 1)
x = x.contiguous()
x = x.view(x.size(0), self.hidden_dim, self.height, self.width)
x = self.lstm_batch_norm(x)
x = F.elu(self.conv4(x))
x = F.elu(self.conv5(x))
logit = self.move_conv(x)
logit = logit.view(logit.size(0), -1)
x = self.value_conv(x)
x = x.view(x.size(0), self.hidden_dim, self.batch)
x = F.max_pool1d(x, self.batch)
x = x.squeeze()
val = self.value_linear(x)
return val, logit
def forward(self, x):
# x is (batch, len, d)
x = x.unsqueeze(1) # (batch, Ci, len, d)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(batch, Co, len), ...]
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]
x = torch.cat(x, 1)
return x
def _get_blocks_for_sentence(self, sent):
block_a = {}
block_b = {}
for ws in self.filter_widths:
if np.isinf(ws):
sent_flattened, sent_flattened_size = sent.contiguous().view(sent.size(0), 1, -1), sent.size(1) * sent.size(2)
block_a[ws] = {
'max': F.max_pool1d(sent_flattened, sent_flattened_size).view(sent.size(0), -1),
'min': F.max_pool1d(-1 * sent_flattened, sent_flattened_size).view(sent.size(0), -1),
'mean': F.avg_pool1d(sent_flattened, sent_flattened_size).view(sent.size(0), -1)
}
continue
holistic_conv_out = self.holistic_conv_layers[ws - 1](sent)
block_a[ws] = {
'max': F.max_pool1d(holistic_conv_out, holistic_conv_out.size(2)).contiguous().view(-1, self.n_holistic_filters),
'min': F.max_pool1d(-1 * holistic_conv_out, holistic_conv_out.size(2)).contiguous().view(-1, self.n_holistic_filters),
'mean': F.avg_pool1d(holistic_conv_out, holistic_conv_out.size(2)).contiguous().view(-1, self.n_holistic_filters)
}
per_dim_conv_out = self.per_dim_conv_layers[ws - 1](sent)
block_b[ws] = {
'max': F.max_pool1d(per_dim_conv_out, per_dim_conv_out.size(2)).contiguous().view(-1, self.n_word_dim, self.n_per_dim_filters),
'min': F.max_pool1d(-1 * per_dim_conv_out, per_dim_conv_out.size(2)).contiguous().view(-1, self.n_word_dim, self.n_per_dim_filters)
}
return block_a, block_b
def conv_and_pool(self, x, conv):
x = F.relu(conv(x)).squeeze(3) #(N,Co,W)
x = F.max_pool1d(x, x.size(2)).squeeze(2)
return x
def fwdUttEnc(module, rnnOut): # forward utterance encoder to get the utterance representation
uttEncOut = None
if SharedModel.args.utt_enc_type == 0: # Encoding by summation
uttEncOut = rnnOut.sum(0).squeeze(0)
elif SharedModel.args.utt_enc_type == 1: # Encoding by mean
uttEncOut = rnnOut.mean(0).squeeze(0)
else: # Encoding by CNN
uttEncOut = []
for i, curConv in enumerate(module.uttEncoder):
curConvInput = rnnOut.permute(1, 2, 0)
curConvOut = curConv(curConvInput)
curPoolOut = None
if SharedModel.args.utt_enc_type == 2: # using average pooling
curPoolOut = F.avg_pool1d(curConvOut, curConvOut.data.size(2))
else: # using max pooling
curPoolOut = F.max_pool1d(curConvOut, curConvOut.data.size(2))
uttEncOut.append(curPoolOut)
uttEncOut = torch.cat(uttEncOut, 1)
uttEncOut = uttEncOut.squeeze(2)
uttEncOut = F.tanh(uttEncOut)
if SharedModel.args.utt_enc_noise == True:
module.uttEncNoise.data.resize_(uttEncOut.size()).normal_(0, 0.1) # Add white noises to the utterance encoding
uttEncOut.add_(module.uttEncNoise)
if SharedModel.args.utt_enc_bn == True:
uttEncOut = module.uttBn(uttEncOut)
return uttEncOut
def forward(self, input, lengths):
N, T = input.size(0), input.size(1)
conv_bank_out = []
input_t = input.transpose(1, 2) # NxTxH -> NxHxT
for i in range(self.num_filters):
tmp_input = input_t
if i % 2 == 0:
tmp_input = tmp_input.unsqueeze(-1)
tmp_input = F.pad(tmp_input, (0,0,0,1)).squeeze(-1) # NxHxT
conv_bank_out.append(self.conv_bank[i](tmp_input))
residual = torch.cat(conv_bank_out, dim=1) # NxHFxT
residual = F.relu(self.bn_list[0](residual))
residual = F.max_pool1d(residual, 2, stride=1)
residual = self.conv1(residual) # NxHxT
residual = F.relu(self.bn_list[1](residual))
residual = self.conv2(residual) # NxHxT
residual = self.bn_list[2](residual).transpose(1,2) # NxHxT -> NxTxH
rnn_input = input
if rnn_input.size() != residual.size():
rnn_input = self.residual_proj(rnn_input)
rnn_input = rnn_input + residual
rnn_input = self.highway(rnn_input).view(N, T, -1)
output = rnn.pack_padded_sequence(rnn_input, lengths, True)
output, _ = self.BGRU(output) # zero h_0 is used by default
output, _ = rnn.pad_packed_sequence(output, True) # NxTx2H
return output
model_CNN_LSTM.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def forward(self, x):
# print("fffff",x)
embed = self.embed(x)
# CNN
cnn_x = embed
cnn_x = torch.transpose(cnn_x, 0, 1)
cnn_x = cnn_x.unsqueeze(1)
cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks)
cnn_x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in cnn_x] # [(N,Co), ...]*len(Ks)
cnn_x = torch.cat(cnn_x, 1)
cnn_x = self.dropout(cnn_x)
# LSTM
lstm_x = embed.view(len(x), embed.size(1), -1)
lstm_out, self.hidden = self.lstm(lstm_x, self.hidden)
lstm_out = torch.transpose(lstm_out, 0, 1)
lstm_out = torch.transpose(lstm_out, 1, 2)
# lstm_out = F.tanh(lstm_out)
lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2)).squeeze(2)
# CNN and LSTM cat
cnn_x = torch.transpose(cnn_x, 0, 1)
lstm_out = torch.transpose(lstm_out, 0, 1)
cnn_lstm_out = torch.cat((cnn_x, lstm_out), 0)
cnn_lstm_out = torch.transpose(cnn_lstm_out, 0, 1)
# linear
cnn_lstm_out = self.hidden2label1(F.tanh(cnn_lstm_out))
cnn_lstm_out = self.hidden2label2(F.tanh(cnn_lstm_out))
# output
logit = cnn_lstm_out
return logit
model_DeepCNN_MUI.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def forward(self, x):
x_no_static = self.embed_no_static(x)
# x_no_static = self.dropout(x_no_static)
x_static = self.embed_static(x)
# fix the embedding
x_static = Variable(x_static.data)
# x_static = self.dropout(x_static)
x = torch.stack([x_static, x_no_static], 1)
one_layer = x # (N,W,D) # torch.Size([64, 43, 300])
# print("one_layer {}".format(one_layer.size()))
# one_layer = self.dropout(one_layer)
# one_layer = one_layer.unsqueeze(1) # (N,Ci,W,D) # torch.Size([64, 1, 43, 300])
# one layer
one_layer = [torch.transpose(F.relu(conv(one_layer)).squeeze(3), 1, 2).unsqueeze(1) for conv in self.convs1] # torch.Size([64, 100, 36])
# one_layer = [F.relu(conv(one_layer)).squeeze(3).unsqueeze(1) for conv in self.convs1] # torch.Size([64, 100, 36])
# print(one_layer[0].size())
# print(one_layer[1].size())
# two layer
two_layer = [F.relu(conv(one_layer)).squeeze(3) for (conv, one_layer) in zip(self.convs2, one_layer)]
# print("two_layer {}".format(two_layer[0].size()))
# print("two_layer {}".format(two_layer[1].size()))
# pooling
output = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in two_layer] # torch.Size([64, 100]) torch.Size([64, 100])
output = torch.cat(output, 1) # torch.Size([64, 300])
# dropout
output = self.dropout(output)
# linear
output = self.fc1(output)
logit = self.fc2(F.relu(output))
return logit
model_CNN_MUI.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def conv_and_pool(self, x, conv):
x = F.relu(conv(x)).squeeze(3) #(N,Co,W)
x = F.max_pool1d(x, x.size(2)).squeeze(2)
return x
model_CNN_MUI.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def forward(self, x):
# print("aaaaa")
x_no_static = self.embed_no_static(x)
# x_no_static = self.dropout(x_no_static)
x_static = self.embed_static(x)
# fix the embedding
# x_static = Variable(x_static.data)
# x_static = self.dropout(x_static)
x = torch.stack([x_static, x_no_static], 1)
# x = x.unsqueeze(1) # (N,Ci,W,D)
x = self.dropout(x)
if self.args.batch_normalizations is True:
x = [F.relu(self.convs1_bn(conv(x))).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
else:
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(N,Co), ...]*len(Ks)
x = torch.cat(x, 1)
'''
x1 = self.conv_and_pool(x,self.conv13) #(N,Co)
x2 = self.conv_and_pool(x,self.conv14) #(N,Co)
x3 = self.conv_and_pool(x,self.conv15) #(N,Co)
x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)
'''
x = self.dropout(x) # (N,len(Ks)*Co)
if self.args.batch_normalizations is True:
x = self.fc1(x)
logit = self.fc2(F.relu(x))
else:
x = self.fc1(x)
logit = self.fc2(F.relu(x))
return logit
model_CNN_BiLSTM.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def forward(self, x):
embed = self.embed(x)
# CNN
cnn_x = embed
cnn_x = torch.transpose(cnn_x, 0, 1)
cnn_x = cnn_x.unsqueeze(1)
# cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks)
cnn_x = [conv(cnn_x).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks)
# cnn_x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in cnn_x] # [(N,Co), ...]*len(Ks)
cnn_x = [F.tanh(F.max_pool1d(i, i.size(2)).squeeze(2)) for i in cnn_x] # [(N,Co), ...]*len(Ks)
cnn_x = torch.cat(cnn_x, 1)
cnn_x = self.dropout(cnn_x)
# BiLSTM
bilstm_x = embed.view(len(x), embed.size(1), -1)
bilstm_out, self.hidden = self.bilstm(bilstm_x, self.hidden)
bilstm_out = torch.transpose(bilstm_out, 0, 1)
bilstm_out = torch.transpose(bilstm_out, 1, 2)
# bilstm_out = F.tanh(bilstm_out)
bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
bilstm_out = F.tanh(bilstm_out)
# CNN and BiLSTM CAT
cnn_x = torch.transpose(cnn_x, 0, 1)
bilstm_out = torch.transpose(bilstm_out, 0, 1)
cnn_bilstm_out = torch.cat((cnn_x, bilstm_out), 0)
cnn_bilstm_out = torch.transpose(cnn_bilstm_out, 0, 1)
# linear
cnn_bilstm_out = self.hidden2label1(F.tanh(cnn_bilstm_out))
# cnn_bilstm_out = F.tanh(self.hidden2label1(cnn_bilstm_out))
cnn_bilstm_out = self.hidden2label2(F.tanh(cnn_bilstm_out))
# cnn_bilstm_out = self.hidden2label2(cnn_bilstm_out)
# output
logit = cnn_bilstm_out
return logit
model.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def conv_and_pool(self, x, conv):
x = F.relu(conv(x)).squeeze(3) #(N,Co,W)
x = F.max_pool1d(x, x.size(2)).squeeze(2)
return x
model_HighWay_CNN.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def forward(self, x):
# print("source x {} ".format(x.size()))
x = self.embed(x) # (N,W,D)
x = self.dropout(x)
x = x.unsqueeze(1) # (N,Ci,W,D)
if self.args.batch_normalizations is True:
x = [self.convs1_bn(F.tanh(conv(x))).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
else:
# x = [self.dropout(F.relu(conv(x)).squeeze(3)) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
# x = [F.tanh(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
# x = [conv(x).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
x = torch.cat(x, 1)
# x = self.dropout(x) # (N,len(Ks)*Co)
if self.args.batch_normalizations is True:
x = self.fc1_bn(self.fc1(x))
fc = self.fc2_bn(self.fc2(F.tanh(x)))
else:
fc = self.fc1(x)
# fc = self.fc2(F.relu(x))
# print("xxx {} ".format(x.size()))
gate_layer = F.sigmoid(self.gate_layer(x))
# calculate highway layer values
# print(" fc_size {} gate_layer_size {}".format(fc.size(), gate_layer.size()))
gate_fc_layer = torch.mul(fc, gate_layer)
# print("gate_layer {} ".format(gate_layer))
# print("1 - gate_layer size {} ".format((1 - gate_layer).size()))
# if write like follow ,can run,but not equal the HighWay NetWorks formula
# gate_input = torch.mul((1 - gate_layer), fc)
gate_input = torch.mul((1 - gate_layer), x)
highway_output = torch.add(gate_fc_layer, gate_input)
logit = self.logit_layer(highway_output)
return logit
model_BiLSTM.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def forward(self, x):
embed = self.embed(x)
x = embed.view(len(x), embed.size(1), -1)
bilstm_out, self.hidden = self.bilstm(x, self.hidden)
bilstm_out = torch.transpose(bilstm_out, 0, 1)
bilstm_out = torch.transpose(bilstm_out, 1, 2)
bilstm_out = F.tanh(bilstm_out)
bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
y = self.hidden2label1(bilstm_out)
y = self.hidden2label2(y)
logit = y
return logit
model_CNN_BiGRU.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 55
收藏 0
点赞 0
评论 0
def forward(self, x):
embed = self.embed(x)
embed = self.dropout(embed)
# CNN
cnn_x = embed
cnn_x = torch.transpose(cnn_x, 0, 1)
cnn_x = cnn_x.unsqueeze(1)
# cnn_x = [F.relu(conv(cnn_x)).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks)
cnn_x = [conv(cnn_x).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks)
# cnn_x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in cnn_x] # [(N,Co), ...]*len(Ks)
cnn_x = [F.tanh(F.max_pool1d(i, i.size(2)).squeeze(2)) for i in cnn_x] # [(N,Co), ...]*len(Ks)
cnn_x = torch.cat(cnn_x, 1)
cnn_x = self.dropout(cnn_x)
# BiGRU
bigru_x = embed.view(len(x), embed.size(1), -1)
bigru_x, self.hidden = self.bigru(bigru_x, self.hidden)
bigru_x = torch.transpose(bigru_x, 0, 1)
bigru_x = torch.transpose(bigru_x, 1, 2)
# bilstm_out = F.tanh(bilstm_out)
bigru_x = F.max_pool1d(bigru_x, bigru_x.size(2)).squeeze(2)
bigru_x = F.tanh(bigru_x)
# CNN and BiGRU CAT
cnn_x = torch.transpose(cnn_x, 0, 1)
bigru_x = torch.transpose(bigru_x, 0, 1)
cnn_bigru_out = torch.cat((cnn_x, bigru_x), 0)
cnn_bigru_out = torch.transpose(cnn_bigru_out, 0, 1)
# linear
cnn_bigru_out = self.hidden2label1(F.tanh(cnn_bigru_out))
logit = self.hidden2label2(F.tanh(cnn_bigru_out))
return logit
model_HighWay_BiLSTM_1.py 文件源码
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
作者: bamtercelboo
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def forward(self, x):
x = self.embed(x)
x = self.dropout(x)
# x = x.view(len(x), x.size(1), -1)
# x = embed.view(len(x), embed.size(1), -1)
bilstm_out, self.hidden = self.bilstm(x, self.hidden)
bilstm_out = torch.transpose(bilstm_out, 0, 1)
bilstm_out = torch.transpose(bilstm_out, 1, 2)
# bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2))
bilstm_out = bilstm_out.squeeze(2)
hidden2lable = self.hidden2label1(F.tanh(bilstm_out))
gate_layer = F.sigmoid(self.gate_layer(bilstm_out))
# calculate highway layer values
gate_hidden_layer = torch.mul(hidden2lable, gate_layer)
# if write like follow ,can run,but not equal the HighWay NetWorks formula
# gate_input = torch.mul((1 - gate_layer), hidden2lable)
gate_input = torch.mul((1 - gate_layer), bilstm_out)
highway_output = torch.add(gate_hidden_layer, gate_input)
logit = self.logit_layer(highway_output)
return logit
def conv_and_pool(self, x, conv):
x = F.relu(conv(x)).squeeze(3) # (N,Co,W)
x = F.max_pool1d(x, x.size(2)).squeeze(2)
return x
def forward(self, x):
embed = self.embed(x)
x = embed.view(len(x), embed.size(1), -1)
bilstm_out, self.hidden = self.bilstm(x, self.hidden)
bilstm_out = torch.transpose(bilstm_out, 0, 1)
bilstm_out = torch.transpose(bilstm_out, 1, 2)
bilstm_out = F.tanh(bilstm_out)
bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
y = self.hidden2label1(bilstm_out)
y = self.hidden2label2(y)
logit = y
return logit
def forward(self, x):
# Transpose to the shape (sentence_batch size, word dim, sequence length)
x = x.transpose(0, 1).transpose(1, 2)
feature_maps = []
for layer in self.layers:
x = layer(x)
feature_maps.append(F.max_pool1d(x, kernel_size=x.size(2)).squeeze())
features = torch.cat(feature_maps, dim=1)
return features