def __init__(self, args):
super(DEEP_CNN_MUI, self).__init__()
self.args = args
V = args.embed_num
V_mui = args.embed_num_mui
D = args.embed_dim
C = args.class_num
Ci = 2
Co = args.kernel_num
Ks = args.kernel_sizes
if args.max_norm is not None:
print("max_norm = {} ".format(args.max_norm))
self.embed_no_static = nn.Embedding(V, D, max_norm=args.max_norm, scale_grad_by_freq=True)
self.embed_static = nn.Embedding(V_mui, D, max_norm=args.max_norm, scale_grad_by_freq=True)
else:
print("max_norm = {} ".format(args.max_norm))
self.embed_no_static = nn.Embedding(V, D, scale_grad_by_freq=True)
self.embed_static = nn.Embedding(V_mui, D, scale_grad_by_freq=True)
if args.word_Embedding:
pretrained_weight = np.array(args.pretrained_weight)
self.embed_no_static.weight.data.copy_(torch.from_numpy(pretrained_weight))
pretrained_weight_static = np.array(args.pretrained_weight_static)
self.embed_static.weight.data.copy_(torch.from_numpy(pretrained_weight_static))
# whether to fixed the word embedding
self.embed_no_static.weight.requires_grad = True
# cons layer
self.convs1 = [nn.Conv2d(Ci, D, (K, D), stride=1, padding=(K//2, 0), bias=True) for K in Ks]
self.convs2 = [nn.Conv2d(1, Co, (K, D), stride=1, padding=(K//2, 0), bias=True) for K in Ks]
print(self.convs1)
print(self.convs2)
if args.init_weight:
print("Initing W .......")
for (conv1, conv2) in zip(self.convs1, self.convs2):
init.xavier_normal(conv1.weight.data, gain=np.sqrt(args.init_weight_value))
init.uniform(conv1.bias, 0, 0)
init.xavier_normal(conv2.weight.data, gain=np.sqrt(args.init_weight_value))
init.uniform(conv2.bias, 0, 0)
# dropout
self.dropout = nn.Dropout(args.dropout)
# linear
in_fea = len(Ks) * Co
self.fc1 = nn.Linear(in_features=in_fea, out_features=in_fea // 2, bias=True)
self.fc2 = nn.Linear(in_features=in_fea // 2, out_features=C, bias=True)
model_DeepCNN_MUI.py 文件源码
python
阅读 27
收藏 0
点赞 0
评论 0
评论列表
文章目录