python类dropout()的实例源码

my_resnet.py 文件源码 项目:pytorch-planet-amazon 作者: rwightman 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.global_pool(x)
        x = x.view(x.size(0), -1)

        if self.drop_rate > 0.:
            x = F.dropout(x, p=self.drop_rate, training=self.training)

        x = self.fc(x)
        return x
custom.py 文件源码 项目:seqmod 作者: emanjavacas 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def forward(self, inp):
        """
        :param inp: torch.FloatTensor (batch_size x inp_size)

        :return: torch.FloatTensor (batch_size x nb_classes)
        """
        # hidden layers
        for layer in self.layers:
            out = layer(inp)
            if self.act is not None:
                out = getattr(F, self.act)(out)
            if self.dropout > 0:
                out = F.dropout(out, p=self.dropout, training=self.training)
            inp = out

        # output projection
        out = self.output(out)

        return out
custom.py 文件源码 项目:seqmod 作者: emanjavacas 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, cell, num_layers, in_dim, hid_dim,
                 dropout=0.0, **kwargs):
        """
        cell: str or custom cell class
        """
        super(BaseStackedRNN, self).__init__()
        self.in_dim = in_dim
        self.hid_dim = hid_dim
        self.has_dropout = False
        if dropout:
            self.has_dropout = True
            self.dropout = nn.Dropout(dropout)
        self.num_layers = num_layers
        self.layers = nn.ModuleList()

        if isinstance(cell, str):
            cell = getattr(nn, cell)
        for i in range(num_layers):
            self.layers.append(cell(in_dim, hid_dim, **kwargs))
            in_dim = hid_dim
model.py 文件源码 项目:cnn-graph-classification 作者: giannisnik 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def forward(self, x_in):
        out = F.relu(F.max_pool3d(self.conv(x_in), (1, self.max_document_length,1)))
        out = out.view(out.size(0), -1)
        out = F.relu(self.fc1(out))
        out = F.dropout(out, training=self.training)
        out = self.fc2(out)
        return F.log_softmax(out)
example1.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
example2_gradient.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)

        # Register a backward hook
        x.register_hook(myGradientHook)

        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
example3.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
example2_adv_example.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
example5.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = F.elu(F.max_pool2d(self.conv1(x), 2))
        x = F.elu(F.max_pool2d(self.bn2(self.conv2(x)), 2))
        x = F.elu(F.max_pool2d(self.bn3(self.conv3(x)), 2))
        x = F.elu(F.max_pool2d(self.bn4(self.conv4(x)), 2))

        x = x.view(-1, 750)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)
example6_squeezenet.py 文件源码 项目:pytorch_tutorial 作者: soravux 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = F.dropout(x, training=self.training)
        x = self.conv(x)
        x = self.avgpool(x)
        x = F.log_softmax(x)
        x = x.squeeze(dim=3).squeeze(dim=2)
        return x
layers.py 文件源码 项目:DenseNet 作者: kevinzakka 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, in_channels, out_channels, bottleneck, p):
        """
        Initialize the different parts of the SubBlock.

        Params
        ------
        - in_channels: number of input channels in the convolution.
        - out_channels: number of output channels in the convolution.
        - bottleneck: if true, applies the bottleneck variant of H(x).
        - p: if greater than 0, applies dropout after the convolution.
        """
        super(SubBlock, self).__init__()
        self.bottleneck = bottleneck
        self.p = p

        in_channels_2 = in_channels
        out_channels_2 = out_channels

        if bottleneck:
            in_channels_1 = in_channels
            out_channels_1 = out_channels * 4
            in_channels_2 = out_channels_1

            self.bn1 = nn.BatchNorm2d(in_channels_1)
            self.conv1 = nn.Conv2d(in_channels_1,
                                   out_channels_1,
                                   kernel_size=1)

        self.bn2 = nn.BatchNorm2d(in_channels_2)
        self.conv2 = nn.Conv2d(in_channels_2, 
                               out_channels_2, 
                               kernel_size=3, 
                               padding=1)
layers.py 文件源码 项目:DenseNet 作者: kevinzakka 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward(self, x):
        out = self.pool(self.conv(F.relu(self.bn(x))))
        if self.p > 0:
            out = F.dropout(out, p=self.p, training=self.training)
        return out
model.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, args):
        super().__init__()

        for k, v in args.__dict__.items():
            self.__setattr__(k, v)

        self.num_directions = 2 if self.bidirectional else 1
        self.lookup_table = nn.Embedding(self.vocab_size, self.embed_dim)
        self.lstm = nn.LSTM(self.embed_dim,
                    self.hidden_size,
                    self.lstm_layers,
                    batch_first=True,
                    dropout=self.dropout,
                    bidirectional=self.bidirectional)
        self.lr = nn.Linear(self.hidden_size*self.num_directions,
                        self.vocab_size)

        self._init_weights()
model.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def forward(self, input, hidden):
        encode = self.lookup_table(input)
        lstm_out, hidden = self.lstm(encode, hidden)
        lstm_out = F.dropout(lstm_out, p=self.dropout)
        out = self.lr(lstm_out.contiguous().view(-1, lstm_out.size(2)))
        return F.log_softmax(out), hidden
layers.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, d_k, dropout):
        super().__init__()
        self.temper = np.power(d_k, 0.5)
        self.dropout = nn.Dropout(dropout)
        self.softmax = nn.Softmax()
layers.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward(self, q, k, v, attn_mask):
        attn = torch.bmm(q, k.transpose(1, 2)) / self.temper
        attn.data.masked_fill_(attn_mask, -float('inf'))

        attn = self.softmax(attn.view(-1, attn.size(2))).view(*attn.size())
        attn = self.dropout(attn)
        return torch.bmm(attn, v)
layers.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, n_head, d_model, dropout):
        super().__init__()
        self.n_head = n_head
        self.d_v = self.d_k = d_k = d_model // n_head

        for name in ["w_qs", "w_ks", "w_vs"]:
            self.__setattr__(name,
                nn.Parameter(torch.FloatTensor(n_head, d_model, d_k)))

        self.attention = ScaledDotProductAttention(d_k, dropout)
        self.lm = LayerNorm(d_model)
        self.w_o = nn.Linear(d_model, d_model, bias=False)
        self.dropout = dropout

        self._init_weight()
layers.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, d_model, d_ff, dropout):
        super().__init__()

        self.seq = nn.Sequential(
                nn.Conv1d(d_model, d_ff, 1),
                nn.ReLU(),
                nn.Conv1d(d_ff, d_model, 1),
                nn.Dropout(dropout)
            )
        self.lm = LayerNorm(d_model)
layers.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, d_model, d_ff, n_head, dropout):
        super().__init__()
        self.mh = MultiHeadAtt(n_head, d_model, dropout)
        self.pw = PositionWise(d_model, d_ff, dropout)
layers.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, d_model, d_ff, n_head, dropout=0.1):
        super().__init__()
        self.slf_mh = MultiHeadAtt(n_head, d_model, dropout)
        self.dec_mh = MultiHeadAtt(n_head, d_model, dropout)
        self.pw = PositionWise(d_model, d_ff, dropout)


问题


面经


文章

微信
公众号

扫码关注公众号