python类relu()的实例源码

models.py 文件源码 项目:drl.pth 作者: seba-1511 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def forward(self, x, *args, **kwargs):
        x = F.relu(self.affine1(x))
        return x
resnext.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = F.relu(self.bn2(self.conv2(out)))
        out = self.bn3(self.conv3(out))
        out += self.shortcut(x)
        out = F.relu(out)
        return out
resnext.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        # out = self.layer4(out)
        out = F.avg_pool2d(out, 8)
        out = out.view(out.size(0), -1)
        out = self.linear(out)
        return out
example1.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
example2_gradient.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)

        # Register a backward hook
        x.register_hook(myGradientHook)

        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
example3.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
example2_adv_example.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
example5.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = F.elu(F.max_pool2d(self.conv1(x), 2))
        x = F.elu(F.max_pool2d(self.bn2(self.conv2(x)), 2))
        x = F.elu(F.max_pool2d(self.bn3(self.conv3(x)), 2))
        x = F.elu(F.max_pool2d(self.bn4(self.conv4(x)), 2))

        x = x.view(-1, 750)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
dev_model.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.relu(bottleneck, inplace=True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.relu(bottleneck, inplace=True)
        bottleneck = self.conv_expand.forward(bottleneck)
        return x + bottleneck
dev_model.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward(self, input):
        return self.model(input)

        # TODO: fix relu bug
naive_model.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward(self, x):
        bottleneck = self.conv_reduce.forward(x)
        bottleneck = F.relu(bottleneck, inplace=True)
        bottleneck = self.conv_conv.forward(bottleneck)
        bottleneck = F.relu(bottleneck, inplace=True)
        bottleneck = self.conv_expand.forward(bottleneck)
        return x + bottleneck
naive_model.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def forward(self, input):
        return self.model(input)

        # TODO: fix relu bug
OnepassModel.py 文件源码 项目:PaintsPytorch 作者: orashi 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward(self, input, inputV):
        x1 = F.leaky_relu(self.down1(input), 0.2, True)
        x2 = F.leaky_relu(self.down2(x1), 0.2, True)
        x3 = F.leaky_relu(self.down3(x2), 0.2, True)
        x4 = F.leaky_relu(self.down4(x3), 0.2, True)
        x5 = F.leaky_relu(self.down5(x4), 0.2, True)
        x6 = F.leaky_relu(self.down6(x5), 0.2, True)
        x7 = F.leaky_relu(self.down7(x6), 0.2, True)
        x8 = F.relu(self.down8(x7), True)

        v1 = F.leaky_relu(self.downV1(inputV), 0.2, True)
        v2 = F.leaky_relu(self.downV2(v1), 0.2, True)
        v3 = F.leaky_relu(self.downV3(v2), 0.2, True)
        v4 = F.leaky_relu(self.downV4(v3), 0.2, True)
        v5 = F.leaky_relu(self.downV5(v4), 0.2, True)
        v6 = F.leaky_relu(self.downV6(v5), 0.2, True)
        v7 = F.leaky_relu(self.downV7(v6), 0.2, True)
        v8 = F.relu(self.downV8(v7), True)

        x = F.relu(self.up8(torch.cat([x8, v8], 1)), True)
        x = F.relu(self.up7(torch.cat([x, x7, v7], 1)), True)
        x = F.relu(self.up6(torch.cat([x, x6, v6], 1)), True)
        x = F.relu(self.up5(torch.cat([x, x5, v5], 1)), True)
        x = F.relu(self.up4(torch.cat([x, x4, v4], 1)), True)
        x = F.relu(self.up3(torch.cat([x, x3, v3], 1)), True)
        x = F.relu(self.up2(torch.cat([x, x2], 1)), True)
        x = F.tanh(self.up1(torch.cat([x, x1], 1)))
        return x


############################
# D network
###########################
densenet.py 文件源码 项目:ResNeXt-DenseNet 作者: D-X-Y 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def forward(self, x):
    out = self.conv1(F.relu(self.bn1(x)))
    out = self.conv2(F.relu(self.bn2(out)))
    out = torch.cat((x, out), 1)
    return out
densenet.py 文件源码 项目:ResNeXt-DenseNet 作者: D-X-Y 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def forward(self, x):
    out = self.conv1(F.relu(self.bn1(x)))
    out = torch.cat((x, out), 1)
    return out
densenet.py 文件源码 项目:ResNeXt-DenseNet 作者: D-X-Y 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def forward(self, x):
    out = self.conv1(F.relu(self.bn1(x)))
    out = F.avg_pool2d(out, 2)
    return out
densenet.py 文件源码 项目:ResNeXt-DenseNet 作者: D-X-Y 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def forward(self, x):
    out = self.conv1(x)
    out = self.trans1(self.dense1(out))
    out = self.trans2(self.dense2(out))
    out = self.dense3(out)
    out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
    out = F.log_softmax(self.fc(out))
    return out
resnet.py 文件源码 项目:ResNeXt-DenseNet 作者: D-X-Y 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def forward(self, x):
    residual = x

    basicblock = self.conv_a(x)
    basicblock = self.bn_a(basicblock)
    basicblock = F.relu(basicblock, inplace=True)

    basicblock = self.conv_b(basicblock)
    basicblock = self.bn_b(basicblock)

    if self.downsample is not None:
      residual = self.downsample(x)

    return F.relu(residual + basicblock, inplace=True)
resnext.py 文件源码 项目:ResNeXt-DenseNet 作者: D-X-Y 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def forward(self, x):
    x = self.conv_1_3x3(x)
    x = F.relu(self.bn_1(x), inplace=True)
    x = self.stage_1(x)
    x = self.stage_2(x)
    x = self.stage_3(x)
    x = self.avgpool(x)
    x = x.view(x.size(0), -1)
    return self.classifier(x)
retinanet.py 文件源码 项目:RetinaNet 作者: c0nn3r 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def forward(self, x):

        # don't need resnet_feature_2 as it is too large
        _, resnet_feature_3, resnet_feature_4, resnet_feature_5 = self.resnet(x)

        pyramid_feature_6 = self.pyramid_transformation_6(resnet_feature_5)
        pyramid_feature_7 = self.pyramid_transformation_7(F.relu(pyramid_feature_6))

        pyramid_feature_5 = self.pyramid_transformation_5(resnet_feature_5)
        pyramid_feature_4 = self.pyramid_transformation_4(resnet_feature_4)
        upsampled_feature_5 = self._upsample(pyramid_feature_5, pyramid_feature_4)

        pyramid_feature_4 = self.upsample_transform_1(
            torch.add(upsampled_feature_5, pyramid_feature_4)
        )

        pyramid_feature_3 = self.pyramid_transformation_3(resnet_feature_3)
        upsampled_feature_4 = self._upsample(pyramid_feature_4, pyramid_feature_3)

        pyramid_feature_3 = self.upsample_transform_2(
            torch.add(upsampled_feature_4, pyramid_feature_3)
        )

        return (pyramid_feature_3,
                pyramid_feature_4,
                pyramid_feature_5,
                pyramid_feature_6,
                pyramid_feature_7)


问题


面经


文章

微信
公众号

扫码关注公众号