python类leaky_relu()的实例源码

base_model.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def forward(self, input, VGG):
        x1 = F.leaky_relu(self.down1(input), 0.2, True)
        x2 = F.leaky_relu(self.down2(x1), 0.2, True)
        x3 = F.leaky_relu(self.down3(x2), 0.2, True)
        x4 = F.leaky_relu(self.down4(x3), 0.2, True)
        x5 = F.leaky_relu(self.down5(x4), 0.2, True)
        x6 = F.leaky_relu(self.down6(x5), 0.2, True)
        x7 = F.leaky_relu(self.down7(x6), 0.2, True)
        x8 = F.relu(self.down8(x7), True)

        VGG = F.relu(self.linear(VGG), True)
        x = F.relu(self.up8(torch.cat([x8, VGG.view(-1, 2048, 1, 1)], 1)), True)
        x = F.relu(self.up7(torch.cat([x, x7], 1)), True)
        x = F.relu(self.up6(torch.cat([x, x6], 1)), True)
        x = F.relu(self.up5(torch.cat([x, x5], 1)), True)
        x = F.relu(self.up4(torch.cat([x, x4], 1)), True)
        x = F.relu(self.up3(torch.cat([x, x3], 1)), True)
        x = F.relu(self.up2(torch.cat([x, x2], 1)), True)
        x = F.tanh(self.up1(torch.cat([x, x1], 1)))
        return x


############################
# D network
###########################
models.py 文件源码 项目:simple-pix2pix-pytorch 作者: Eiji-Kb 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def forward(self, x):           
        en0 = self.c0(x)
        en1 = self.bnc1(self.c1(F.leaky_relu(en0, negative_slope=0.2)))
        en2 = self.bnc2(self.c2(F.leaky_relu(en1, negative_slope=0.2)))
        en3 = self.bnc3(self.c3(F.leaky_relu(en2, negative_slope=0.2)))
        en4 = self.bnc4(self.c4(F.leaky_relu(en3, negative_slope=0.2)))
        en5 = self.bnc5(self.c5(F.leaky_relu(en4, negative_slope=0.2)))
        en6 = self.bnc6(self.c6(F.leaky_relu(en5, negative_slope=0.2)))
        en7 = self.c7(F.leaky_relu(en6, negative_slope=0.2))

        de7 = self.bnd7(self.d7(F.relu(en7)))
        de6 = F.dropout(self.bnd6(self.d6(F.relu(torch.cat((en6, de7),1)))))
        de5 = F.dropout(self.bnd5(self.d5(F.relu(torch.cat((en5, de6),1)))))

        de4 = F.dropout(self.bnd4(self.d4(F.relu(torch.cat((en4, de5),1)))))
        de3 = self.bnd3(self.d3(F.relu(torch.cat((en3, de4),1))))
        de2 = self.bnd2(self.d2(F.relu(torch.cat((en2, de3),1))))
        de1 = self.bnd1(self.d1(F.relu(torch.cat((en1, de2),1))))

        de0 = F.tanh(self.d0(F.relu(torch.cat((en0, de1),1))))       

        return de0
models.py 文件源码 项目:self-driving-truck 作者: aleju 项目源码 文件源码 阅读 109 收藏 0 点赞 0 评论 0
def forward(self, embeddings_supervised, speeds, is_reverse, steering_wheel, steering_wheel_raw, multiactions_vecs):
        def act(x):
            return F.leaky_relu(x, negative_slope=0.2, inplace=True)

        x_emb_sup = embeddings_supervised # 512x3x5
        x_emb_sup = act(self.emb_sup_c1_sd(self.emb_sup_c1_bn(self.emb_sup_c1(x_emb_sup)))) # 1024x1x3
        x_emb_sup = x_emb_sup.view(-1, 1024*1*3)
        x_emb_sup = add_white_noise(x_emb_sup, 0.005, self.training)

        x_emb_add = torch.cat([speeds, is_reverse, steering_wheel, steering_wheel_raw, multiactions_vecs], 1)
        x_emb_add = act(self.emb_add_fc1_bn(self.emb_add_fc1(x_emb_add)))
        x_emb_add = add_white_noise(x_emb_add, 0.005, self.training)

        x_emb = torch.cat([x_emb_sup, x_emb_add], 1)
        x_emb = F.dropout(x_emb, p=0.05, training=self.training)

        embs = F.relu(self.emb_fc1_bn(self.emb_fc1(x_emb)))
        # this is currently always on, to decrease the likelihood of systematic
        # errors that are repeated over many frames
        embs = add_white_noise(embs, 0.005, True)

        return embs
models.py 文件源码 项目:self-driving-truck 作者: aleju 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def forward(self, embeddings, return_v_adv=False):
        def act(x):
            return F.leaky_relu(x, negative_slope=0.2, inplace=True)

        B, _ = embeddings.size()

        x = act(self.fc1_bn(self.fc1(embeddings)))
        x = add_white_noise(x, 0.005, self.training)
        x = F.dropout(x, p=0.1, training=self.training)

        x_v = self.fc_v(x)
        x_v_expanded = x_v.expand(B, 9)

        x_adv = self.fc_advantage(x)
        x_adv_mean = x_adv.mean(dim=1)
        x_adv_mean = x_adv_mean.expand(B, 9)
        x_adv = x_adv - x_adv_mean

        x = x_v_expanded + x_adv

        if return_v_adv:
            return x, (x_v, x_adv)
        else:
            return x
dcgan_w.py 文件源码 项目:deeplearning 作者: zxjzxj9 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def forward(self, prior):
        prior = prior.cuda()
        fc_layer = leaky_relu(self.linear1(prior).view(-1, 512, 4, 4), negative_slope = 0.2)
        deconv_layer1 = self.bn1(leaky_relu(self.deconv1(fc_layer), negative_slope = 0.2))
        deconv_layer2 = self.bn2(leaky_relu(self.deconv2(deconv_layer1), negative_slope = 0.2))
        deconv_layer3 = tanh(self.deconv3(deconv_layer2))
        return deconv_layer3

#    Infer without batch normalization cannot improve image quality
#    def infer(self, prior):
#        prior = prior.cuda()
#        fc_layer = leaky_relu(self.linear1(prior).view(-1, 512, 4, 4), negative_slope = 0.2)
#        deconv_layer1 = leaky_relu(self.deconv1(fc_layer), negative_slope = 0.2)
#        deconv_layer2 = leaky_relu(self.deconv2(deconv_layer1), negative_slope = 0.2)
#        deconv_layer3 = tanh(self.deconv3(deconv_layer2))
#        return deconv_layer3
cp_model.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_expand.forward(bottleneck)
        x = self.shortcut.forward(x)
        return x + bottleneck
dev_model.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_expand.forward(bottleneck)
        residual = self.shortcut.forward(x)
        return residual + bottleneck
feat_bn_model.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_expand.forward(bottleneck)
        x = self.shortcut.forward(x)
        return x + bottleneck
naive_model.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_expand.forward(bottleneck)
        residual = self.shortcut.forward(x)
        return residual + bottleneck
pack_model.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_expand.forward(bottleneck)
        x = self.shortcut.forward(x)
        return x + bottleneck
pro_model.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_expand.forward(bottleneck)
        x = self.shortcut.forward(x)
        return x + bottleneck
base_model.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def forward(self, input, VGG):
        x = self.model(input)
        VGG = F.leaky_relu(self.linear(VGG), 0.2, True)
        return self.final(x + VGG.view(-1, self.ndf * 8, 1, 1))


############################
# VGG feature
###########################
ins_mode.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_expand.forward(bottleneck)
        x = self.shortcut.forward(x)
        return x + bottleneck
simplify.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 55 收藏 0 点赞 0 评论 0
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.leaky_relu(bottleneck, 0.2, True)
        bottleneck = self.conv_expand.forward(bottleneck)
        x = self.shortcut.forward(x)
        return x + bottleneck
model.py 文件源码 项目:DistanceGAN 作者: sagiebenaim 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def forward(self, x):
        out_1 = F.leaky_relu(self.conv1(x), 0.05)      # (?, 64, 16, 16)
        out_2 = F.leaky_relu(self.conv2(out_1), 0.05)    # (?, 128, 8, 8)

        out_3 = F.leaky_relu(self.conv3(out_2), 0.05)    # ( " )
        out_4 = F.leaky_relu(self.conv4(out_3), 0.05)    # ( " )

        out_5 = F.leaky_relu(self.deconv1(out_4), 0.05)  # (?, 64, 16, 16)
        out = F.tanh(self.deconv2(out_5))              # (?, 3, 32, 32)

        return out
model.py 文件源码 项目:DistanceGAN 作者: sagiebenaim 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def forward(self, x):
        out_1 = F.leaky_relu(self.conv1(x), 0.05)      # (?, 64, 16, 16)
        out_2 = F.leaky_relu(self.conv2(out_1), 0.05)    # (?, 128, 8, 8)

        out_3 = F.leaky_relu(self.conv3(out_2), 0.05)    # ( " )
        out_4 = F.leaky_relu(self.conv4(out_3), 0.05)    # ( " )

        out_5 = F.leaky_relu(self.deconv1(out_4), 0.05)  # (?, 64, 16, 16)
        out = F.tanh(self.deconv2(out_5))              # (?, 1, 32, 32)

        return out
model.py 文件源码 项目:DistanceGAN 作者: sagiebenaim 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def forward(self, x):
        out = F.leaky_relu(self.conv1(x), 0.05)    # (?, 64, 16, 16)
        out = F.leaky_relu(self.conv2(out), 0.05)  # (?, 128, 8, 8)
        out = F.leaky_relu(self.conv3(out), 0.05)  # (?, 256, 4, 4)
        out = self.fc(out).squeeze()
        return out
model.py 文件源码 项目:DistanceGAN 作者: sagiebenaim 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def forward(self, x):
        out = F.leaky_relu(self.conv1(x), 0.05)    # (?, 64, 16, 16)
        out = F.leaky_relu(self.conv2(out), 0.05)  # (?, 128, 8, 8)
        out = F.leaky_relu(self.conv3(out), 0.05)  # (?, 256, 4, 4)
        out = self.fc(out).squeeze()
        return out
model.py 文件源码 项目:mnist-svhn-transfer 作者: yunjey 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def forward(self, x):
        out = F.leaky_relu(self.conv1(x), 0.05)      # (?, 64, 16, 16)
        out = F.leaky_relu(self.conv2(out), 0.05)    # (?, 128, 8, 8)

        out = F.leaky_relu(self.conv3(out), 0.05)    # ( " )
        out = F.leaky_relu(self.conv4(out), 0.05)    # ( " )

        out = F.leaky_relu(self.deconv1(out), 0.05)  # (?, 64, 16, 16)
        out = F.tanh(self.deconv2(out))              # (?, 3, 32, 32)
        return out
model.py 文件源码 项目:mnist-svhn-transfer 作者: yunjey 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def forward(self, x):
        out = F.leaky_relu(self.conv1(x), 0.05)      # (?, 64, 16, 16)
        out = F.leaky_relu(self.conv2(out), 0.05)    # (?, 128, 8, 8)

        out = F.leaky_relu(self.conv3(out), 0.05)    # ( " )
        out = F.leaky_relu(self.conv4(out), 0.05)    # ( " )

        out = F.leaky_relu(self.deconv1(out), 0.05)  # (?, 64, 16, 16)
        out = F.tanh(self.deconv2(out))              # (?, 1, 32, 32)
        return out
model.py 文件源码 项目:mnist-svhn-transfer 作者: yunjey 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward(self, x):
        out = F.leaky_relu(self.conv1(x), 0.05)    # (?, 64, 16, 16)
        out = F.leaky_relu(self.conv2(out), 0.05)  # (?, 128, 8, 8)
        out = F.leaky_relu(self.conv3(out), 0.05)  # (?, 256, 4, 4)
        out = self.fc(out).squeeze()
        return out
model.py 文件源码 项目:mnist-svhn-transfer 作者: yunjey 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def forward(self, x):
        out = F.leaky_relu(self.conv1(x), 0.05)    # (?, 64, 16, 16)
        out = F.leaky_relu(self.conv2(out), 0.05)  # (?, 128, 8, 8)
        out = F.leaky_relu(self.conv3(out), 0.05)  # (?, 256, 4, 4)
        out = self.fc(out).squeeze()
        return out
models.py 文件源码 项目:simple-pix2pix-pytorch 作者: Eiji-Kb 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def forward(self, x1, x2):
        h = self.c0(torch.cat((x1, x2),1))
        h = self.bnc1(self.c1(F.leaky_relu(h, negative_slope=0.2)))
        h = self.bnc2(self.c2(F.leaky_relu(h, negative_slope=0.2)))
        h = self.bnc3(self.c3(F.leaky_relu(h, negative_slope=0.2)))
        h = self.c4(F.leaky_relu(h, negative_slope=0.2))
        h = F.sigmoid(h)

        return h
model.py 文件源码 项目:pytorch-tutorial 作者: yunjey 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def forward(self, z):
        z = z.view(z.size(0), z.size(1), 1, 1)      # If image_size is 64, output shape is as below.
        out = self.fc(z)                            # (?, 512, 4, 4)
        out = F.leaky_relu(self.deconv1(out), 0.05)  # (?, 256, 8, 8)
        out = F.leaky_relu(self.deconv2(out), 0.05)  # (?, 128, 16, 16)
        out = F.leaky_relu(self.deconv3(out), 0.05)  # (?, 64, 32, 32)
        out = F.tanh(self.deconv4(out))             # (?, 3, 64, 64)
        return out
model.py 文件源码 项目:pytorch-tutorial 作者: yunjey 项目源码 文件源码 阅读 46 收藏 0 点赞 0 评论 0
def forward(self, x):                         # If image_size is 64, output shape is as below.
        out = F.leaky_relu(self.conv1(x), 0.05)    # (?, 64, 32, 32)
        out = F.leaky_relu(self.conv2(out), 0.05)  # (?, 128, 16, 16)
        out = F.leaky_relu(self.conv3(out), 0.05)  # (?, 256, 8, 8)
        out = F.leaky_relu(self.conv4(out), 0.05)  # (?, 512, 4, 4)
        out = self.fc(out).squeeze()
        return out
mgru_rte_model.py 文件源码 项目:Recognizing-Textual-Entailment 作者: codedecde 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def forward(self, premise, hypothesis, training=False):
        '''
        inputs:
            premise : batch x T
            hypothesis : batch x T
        outputs :
            pred : batch x num_classes
        '''
        self.train(training)
        batch_size = premise.size(0)

        mask_p = torch.ne(premise, 0).type(dtype)
        mask_h = torch.ne(hypothesis, 0).type(dtype)

        encoded_p = self.embedding(premise)  # batch x T x n_embed
        encoded_p = F.dropout(encoded_p, p=self.options['DROPOUT'], training=training)

        encoded_h = self.embedding(hypothesis)  # batch x T x n_embed
        encoded_h = F.dropout(encoded_h, p=self.options['DROPOUT'], training=training)

        encoded_p = encoded_p.transpose(1, 0)  # T x batch x n_embed
        encoded_h = encoded_h.transpose(1, 0)  # T x batch x n_embed

        mask_p = mask_p.transpose(1, 0)  # T x batch
        mask_h = mask_h.transpose(1, 0)  # T x batch

        h_p_0, h_n_0 = self.init_hidden(batch_size)  # 1 x batch x n_dim
        o_p, h_n = self._gru_forward(self.p_gru, encoded_p, mask_p, h_p_0)  # o_p : T x batch x n_dim
                                                                            # h_n : 1 x batch x n_dim

        o_h, h_n = self._gru_forward(self.h_gru, encoded_h, mask_h, h_n_0)  # o_h : T x batch x n_dim
                                                            # h_n : 1 x batch x n_dim

        r_0 = self.attn_gru_init_hidden(batch_size)
        h_star, alpha_vec = self._attn_gru_forward(o_h, mask_h, r_0, o_p, mask_p)

        h_star = self.out(h_star)  # batch x num_classes
        if self.options['LAST_NON_LINEAR']:
            h_star = F.leaky_relu(h_star)  # Non linear projection
        pred = F.log_softmax(h_star)
        return pred
model.py 文件源码 项目:dong_iccv_2017 作者: woozzu 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def forward(self, img, txt_feat):
        img_feat = self.encoder(img)
        img_feat = F.leaky_relu(img_feat + self.residual_branch(img_feat), 0.2)
        txt_feat = self.compression(txt_feat)

        txt_feat = txt_feat.unsqueeze(-1).unsqueeze(-1)
        txt_feat = txt_feat.repeat(1, 1, img_feat.size(2), img_feat.size(3))
        fusion = torch.cat((img_feat, txt_feat), dim=1)
        output = self.classifier(fusion)
        return output.squeeze()
models.py 文件源码 项目:self-driving-truck 作者: aleju 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def forward(self, embeddings, softmax):
        def act(x):
            return F.leaky_relu(x, negative_slope=0.2, inplace=True)

        x = act(self.fc1_bn(self.fc1(embeddings)))
        x = add_white_noise(x, 0.005, self.training)
        x = F.dropout(x, p=0.1, training=self.training)
        x = self.fc2(x)
        if softmax:
            return F.softmax(x)
        else:
            return x
dcgan_w.py 文件源码 项目:deeplearning 作者: zxjzxj9 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def forward(self, image):
        image = image.cuda()
        conv_layer1 = self.bn1(leaky_relu(self.conv1(image), negative_slope = 0.2))
        conv_layer2 = self.bn2(leaky_relu(self.conv2(conv_layer1), negative_slope = 0.2))
        conv_layer3 = leaky_relu(self.conv3(conv_layer2), negative_slope = 0.2)
        fc_layer1 = self.linear1(conv_layer3.view(-1, 4*4*512))
        return fc_layer1
dcgan.py 文件源码 项目:deeplearning 作者: zxjzxj9 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def forward(self, prior):
        prior = prior.cuda()
        fc_layer = leaky_relu(self.linear1(prior).view(-1, 512, 4, 4), negative_slope = 0.2)
        deconv_layer1 = self.bn1(leaky_relu(self.deconv1(fc_layer), negative_slope = 0.2))
        deconv_layer2 = self.bn2(leaky_relu(self.deconv2(deconv_layer1), negative_slope = 0.2))
        deconv_layer3 = tanh(self.deconv3(deconv_layer2))
        return deconv_layer3


问题


面经


文章

微信
公众号

扫码关注公众号