python类conv2d()的实例源码

metrics.py 文件源码 项目:DeblurGAN 作者: KupynOrest 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def SSIM(img1, img2):
    (_, channel, _, _) = img1.size()
    window_size = 11
    window = create_window(window_size, channel)
    mu1 = F.conv2d(img1, window, padding = window_size/2, groups = channel)
    mu2 = F.conv2d(img2, window, padding = window_size/2, groups = channel)

    mu1_sq = mu1.pow(2)
    mu2_sq = mu2.pow(2)
    mu1_mu2 = mu1*mu2

    sigma1_sq = F.conv2d(img1*img1, window, padding = window_size/2, groups = channel) - mu1_sq
    sigma2_sq = F.conv2d(img2*img2, window, padding = window_size/2, groups = channel) - mu2_sq
    sigma12 = F.conv2d(img1*img2, window, padding = window_size/2, groups = channel) - mu1_mu2

    C1 = 0.01**2
    C2 = 0.03**2

    ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
    return ssim_map.mean()
__init__.py 文件源码 项目:pytorch-ssim 作者: Po-Hsun-Su 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _ssim(img1, img2, window, window_size, channel, size_average = True):
    mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
    mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)

    mu1_sq = mu1.pow(2)
    mu2_sq = mu2.pow(2)
    mu1_mu2 = mu1*mu2

    sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
    sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
    sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2

    C1 = 0.01**2
    C2 = 0.03**2

    ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))

    if size_average:
        return ssim_map.mean()
    else:
        return ssim_map.mean(1).mean(1).mean(1)
modules.py 文件源码 项目:braindecode 作者: robintibor 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def forward(self, x):
        # Create weights for the convolution on demand:
        # size or type of x changed...
        in_channels = x.size()[1]
        weight_shape = (in_channels, 1,
                        self.kernel_size[0], self.kernel_size[1])
        if self.weights is None or (
                (tuple(self.weights.size()) != tuple(weight_shape)) or (
                  self.weights.is_cuda != x.is_cuda
                ) or (
                    self.weights.data.type() != x.data.type()
                )):
            n_pool = np.prod(self.kernel_size)
            weights = np_to_var(
                np.ones(weight_shape, dtype=np.float32) / float(n_pool))
            weights = weights.type_as(x)
            if x.is_cuda:
                weights = weights.cuda()
            self.weights = weights

        pooled = F.conv2d(x, self.weights, bias=None, stride=self.stride,
                          dilation=self.dilation,
                          groups=in_channels,)
        return pooled
layers.py 文件源码 项目:SMASH 作者: ajbrock 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward(self, input, n_out, dilation, ks = (3,3), groups=1):
        # print(ks, self.kernel_size, dilation, (self.kernel_size[0] - ks[0]) //2, self.kernel_size[0] + (self.kernel_size[0] - ks[0]) //2, (ks[0] + ((ks[0] - 1 ) * (dilation[0] - 1 ))) // 2)
        # print(dilation,ks, tuple(int(item) for item in ( (ks[0] + ((ks[0] - 1 ) * (dilation[0] - 1 ))) // 2, (ks[1] + ((ks[1] - 1 ) * (dilation[1] - 1 ))) // 2)))
        return F.conv2d(input,
                        weight=self.weight[:n_out, 
                             :input.size(1) // groups, 
                             (self.kernel_size[0] - ks[0]) //2 : ks[0] + (self.kernel_size[0] - ks[0]) //2,
                             (self.kernel_size[1] - ks[1]) //2 : ks[1] + (self.kernel_size[1] - ks[1]) //2].contiguous(),
                        dilation=tuple(int(d) for d  in dilation),
                        padding=tuple(int(item) for item in ( (ks[0] + ((ks[0] - 1 ) * (dilation[0] - 1 ))) // 2, (ks[1] + ((ks[1] - 1 ) * (dilation[1] - 1 ))) // 2)),
                        groups=int(groups),
                        bias=None)

# A convenience wrapper to prevent the forward() method of SMASH from
# being annoyingly verbose. This version of BatchNorm2D simply 
# slices its weights according to the size of the incoming tensor.
efficient_conv_test.py 文件源码 项目:efficient_densenet_pytorch 作者: gpleiss 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_forward_computes_forward_pass():
    weight = torch.randn(4, 8, 3, 3).cuda()
    input = torch.randn(4, 8, 4, 4).cuda()

    out = F.conv2d(
        input=Variable(input),
        weight=Parameter(weight),
        bias=None,
        stride=1,
        padding=1,
        dilation=1,
        groups=1,
    ).data

    func = _EfficientConv2d(
        stride=1,
        padding=1,
        dilation=1,
        groups=1,
    )
    out_efficient = func.forward(weight, None, input)

    assert(almost_equal(out, out_efficient))
layers.py 文件源码 项目:pytorch_resnet 作者: taokong 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def forward(self, x):
        """Return the deformed featured map"""

        x_shape = x.size()

        offsets = F.conv2d(x, self.weight, self.bias, self.stride,
                        self.padding, self.dilation, self.groups)

        # offsets: (b*c, h, w, 2)
        offsets = self._to_bc_h_w_2(offsets, x_shape)

        # x: (b*c, h, w)
        x = self._to_bc_h_w(x, x_shape)

        # X_offset: (b*c, h, w)
        x_offset = th_batch_map_offsets(x, offsets, grid=self._get_grid(self,x))

        # x_offset: (b, h, w, c)
        x_offset = self._to_b_c_h_w(x_offset, x_shape)

        return x_offset
test.py 文件源码 项目:pyinn 作者: szagoruyko 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_conv2d_depthwise(self):
        n = 6
        x = Variable(torch.randn(1,n,5,5).double().cuda(), requires_grad=True)
        w = Variable(torch.randn(n,1,3,3).double().cuda(), requires_grad=True)
        y_fast = P.conv2d_depthwise(x, w, padding=1)
        y_ref = F.conv2d(x, w, padding=1, groups=n)
        go = torch.randn(y_fast.size()).double().cuda()

        self.assertLess((y_fast - y_ref).data.abs().max(), 1e-9)

        x.requires_grad = True
        w.requires_grad = True
        y_fast.backward(go)
        gx_fast = x.grad.data.clone()
        gw_fast = w.grad.data.clone()

        x.grad.data.zero_()
        w.grad.data.zero_()
        y_ref.backward(go)
        gx_ref = x.grad.data.clone()
        gw_ref = w.grad.data.clone()

        self.assertTrue(gradcheck(partial(P.conv2d_depthwise, padding=1), (x, w,)))
conv2d_depthwise.py 文件源码 项目:pyinn 作者: szagoruyko 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def conv2d_depthwise(input, weight, bias=None, stride=1, padding=0, dilation=1):
    """Depthwise 2D convolution.

    Implements depthwise convolution as in https://arxiv.org/pdf/1704.04861v1.pdf
    MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications

    CUDA kernels from https://github.com/BVLC/caffe/pull/5665
    CPU side is done by F.conv2d

    Equivalent to:
        `F.conv2d(input, weight, groups=input.size(1))`
    """
    assert input.size(1) == weight.size(0)
    if input.is_cuda:
        out = Conv2dDepthwise(stride, padding, dilation)(input, weight)
        if bias is not None:
            out += bias.view(1,-1,1,1)
    else:
        groups = input.size(1)
        out = F.conv2d(input, weight, bias, stride, padding, dilation, groups)
    return out
network_pytorch.py 文件源码 项目:jaccardSegment 作者: bermanmaxim 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def pad_if_needed(input, padding, kind, k_h, k_w, s_h=1, s_w=1, dilation=1):
    if padding == 'VALID':
        return input
    elif padding == 'SAME' and kind in ('conv2d', 'pool2d'):
        in_height, in_width = input.size(2), input.size(3)
        out_height = int(np.ceil(float(in_height) / float(s_h)))
        out_width  = int(np.ceil(float(in_width) / float(s_w)))

        pad_along_height = max((out_height - 1) * s_h + k_h - in_height, 0)
        pad_along_width = max((out_width - 1) * s_w + k_w - in_width, 0)
        pad_top = pad_along_height // 2
        pad_bottom = pad_along_height - pad_top
        pad_left = pad_along_width // 2
        pad_right = pad_along_width - pad_left
        input = F.pad(input, (pad_left, pad_right, pad_top, pad_bottom))
        return input
    elif kind in ('atrous_conv2d',):
        effective_height = k_h + (k_h - 1) * (dilation - 1)
        effective_width = k_w + (k_w - 1) * (dilation - 1)
        return pad_if_needed(input, padding, 'conv2d', effective_height, effective_width, s_h, s_w, dilation=1)
    else:
        raise NotImplementedError
network_pytorch.py 文件源码 项目:jaccardSegment 作者: bermanmaxim 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def conv(self,
             input,
             k_h, 
             k_w, 
             c_o, 
             s_h,
             s_w,
             name,
             relu=True,
             padding=DEFAULT_PADDING,
             group=1,
             biased=True):
        input = pad_if_needed(input, padding, 'conv2d', k_h, k_w, s_h, s_w)

        result = F.conv2d(input, 
                          self.weights[name + '/weights'], 
                          bias=self.weights[name + '/biases'] if biased else None,
                          padding=0, 
                          groups=group,
                          stride=(s_h, s_w))
        if relu:
            result = F.relu(result)
        return result
network_pytorch.py 文件源码 项目:jaccardSegment 作者: bermanmaxim 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def atrous_conv(self,
                    input,
                    k_h, 
                    k_w, 
                    c_o, 
                    dilation,
                    name,
                    relu=True,
                    padding=DEFAULT_PADDING,
                    group=1,
                    biased=True):
        if group != 1:
            raise NotImplementedError
        input = pad_if_needed(input, padding, 'atrous_conv2d', k_h, k_w, dilation=dilation)

        result = F.conv2d(input, 
                          self.weights[name + '/weights'], 
                          bias=self.weights[name + '/biases'] if biased else None,
                          padding=0, 
                          dilation=dilation,
                          groups=group,
                          stride=1)
        if relu:
            result = F.relu(result)
        return result
diracconv.py 文件源码 项目:diracnets 作者: szagoruyko 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def forward(self, input):
        return F.conv2d(input, self.alpha * Variable(self.delta) + self.beta * normalize(self.weight),
                        self.bias, self.stride, self.padding, self.dilation)
diracnet.py 文件源码 项目:diracnets 作者: szagoruyko 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def block(o, params, stats, base, mode, j):
    w = params[base + '.conv']
    alpha = params[base + '.alpha']
    beta = params[base + '.beta']
    delta = Variable(stats[size2name(w.size())])
    w = beta * F.normalize(w.view(w.size(0), -1)).view_as(w) + alpha * delta
    o = F.conv2d(ncrelu(o), w, stride=1, padding=1)
    o = batch_norm(o, params, stats, base + '.bn', mode)
    return o
mnist_with_visdom.py 文件源码 项目:tnt 作者: pytorch 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def f(params, inputs, mode):
    o = inputs.view(inputs.size(0), 1, 28, 28)
    o = F.conv2d(o, params['conv0.weight'], params['conv0.bias'], stride=2)
    o = F.relu(o)
    o = F.conv2d(o, params['conv1.weight'], params['conv1.bias'], stride=2)
    o = F.relu(o)
    o = o.view(o.size(0), -1)
    o = F.linear(o, params['linear2.weight'], params['linear2.bias'])
    o = F.relu(o)
    o = F.linear(o, params['linear3.weight'], params['linear3.bias'])
    return o
mnist.py 文件源码 项目:tnt 作者: pytorch 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def f(params, inputs, mode):
    o = inputs.view(inputs.size(0), 1, 28, 28)
    o = F.conv2d(o, params['conv0.weight'], params['conv0.bias'], stride=2)
    o = F.relu(o)
    o = F.conv2d(o, params['conv1.weight'], params['conv1.bias'], stride=2)
    o = F.relu(o)
    o = o.view(o.size(0), -1)
    o = F.linear(o, params['linear2.weight'], params['linear2.bias'])
    o = F.relu(o)
    o = F.linear(o, params['linear3.weight'], params['linear3.bias'])
    return o
test_nn.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def test_Conv2d_inconsistent_types(self):
        inputs = Variable(torch.randn(4, 1, 7, 7).float())
        weights = Variable(torch.randn(1, 1, 3, 3).double())
        # inconsistent types should raise an exception
        self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
        # but it should work with the same type
        nn.functional.conv2d(inputs.float(), weights.float())
test_nn.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_dirac_identity(self):
        batch, in_c, out_c, size, kernel_size = 8, 3, 4, 5, 3
        # Test 1D
        input_var = Variable(torch.randn(batch, in_c, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv1d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data  # Variables do not support nonzero
        self.assertEqual(input_tensor[:, :, 1:-1], output_tensor[:, :in_c, :])  # Assert in_c outputs are preserved
        assert torch.nonzero(output_tensor[:, in_c:, :]).numel() == 0  # Assert extra outputs are 0

        # Test 2D
        input_var = Variable(torch.randn(batch, in_c, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv2d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :]).numel() == 0

        # Test 3D
        input_var = Variable(torch.randn(batch, in_c, size, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv3d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :, :]).numel() == 0
test_nn.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_Conv2d_inconsistent_types(self):
        inputs = Variable(torch.randn(4, 1, 7, 7).float())
        weights = Variable(torch.randn(1, 1, 3, 3).double())
        # inconsistent types should raise an exception
        self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
        # but it should work with the same type
        nn.functional.conv2d(inputs.float(), weights.float())
test_nn.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_dirac_identity(self):
        batch, in_c, out_c, size, kernel_size = 8, 3, 4, 5, 3
        # Test 1D
        input_var = Variable(torch.randn(batch, in_c, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv1d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data  # Variables do not support nonzero
        self.assertEqual(input_tensor[:, :, 1:-1], output_tensor[:, :in_c, :])  # Assert in_c outputs are preserved
        assert torch.nonzero(output_tensor[:, in_c:, :]).numel() == 0  # Assert extra outputs are 0

        # Test 2D
        input_var = Variable(torch.randn(batch, in_c, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv2d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :]).numel() == 0

        # Test 3D
        input_var = Variable(torch.randn(batch, in_c, size, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv3d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :, :]).numel() == 0
test_nn.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_Conv2d_inconsistent_types(self):
        inputs = Variable(torch.randn(4, 1, 7, 7).float())
        weights = Variable(torch.randn(1, 1, 3, 3).double())
        # inconsistent types should raise an exception
        self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
        # but it should work with the same type
        nn.functional.conv2d(inputs.float(), weights.float())
test_nn.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_Conv2d_inconsistent_types_on_GPU_without_cudnn(self):
        inputs = Variable(torch.randn(4, 1, 7, 7).float().cuda())
        weights = Variable(torch.randn(1, 1, 3, 3).double().cuda())
        bias = Variable(torch.randn(1).double().cuda())

        torch.backends.cudnn.enabled = False
        # inconsistent types should raise an exception
        self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
        self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights.float(), bias))

        # but it should work with the same type
        nn.functional.conv2d(inputs.float(), weights.float(), bias.float())
test_nn.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_Conv2d_inconsistent_types_on_GPU_with_cudnn(self):
        inputs = Variable(torch.randn(4, 1, 7, 7).float().cuda())
        weights = Variable(torch.randn(1, 1, 3, 3).double().cuda())
        bias = Variable(torch.randn(1).double().cuda())

        torch.backends.cudnn.enabled = True
        # inconsistent types should raise an exception
        self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
        self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights.float(), bias))

        # but it should work with the same type
        nn.functional.conv2d(inputs.float(), weights.float(), bias.float())
test_nn.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_calculate_gain_linear(self):
        for fn in ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose2d', 'conv_transpose2d', 'conv_transpose3d']:
            gain = init.calculate_gain(fn)
            self.assertEqual(gain, 1)
encoding.py 文件源码 项目:PyTorch-Encoding 作者: zhanghang1989 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def forward(self, input):
        if isinstance(input, Variable):
            out = F.conv2d(input, self.weight, self.bias, self.stride,
                            self.padding, self.dilation, self.groups)
            return F.pixel_shuffle(out, self.scale_factor)
        elif isinstance(input, tuple) or isinstance(input, list):
            return my_data_parallel(self, input)
        else:
            raise RuntimeError('unknown input type')
basic.py 文件源码 项目:PyTorch-Encoding 作者: zhanghang1989 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def forward(self, input):
        if isinstance(input, Variable):
            return F.conv2d(input, self.weight, self.bias, self.stride,
                            self.padding, self.dilation, self.groups)
        elif isinstance(input, tuple) or isinstance(input, list):
            return my_data_parallel(self, input)
        else:
            raise RuntimeError('unknown input type')
WeightNormalizedConv.py 文件源码 项目:gan-error-avoidance 作者: aleju 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def forward(self, input):
        return self.norm_scale_bias(F.conv2d(input, self.weight, None, self.stride,
                        self.padding, self.dilation, 1))
layers.py 文件源码 项目:SMASH 作者: ajbrock 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def forward(self, input):
        return F.conv2d(input,
                        wn2d(self.weight),
                        self.bias,
                        self.stride,
                        self.padding,
                        self.dilation,
                        self.groups)

# A convenience wrapper to prevent the forward() method of SMASH from
# being annoyingly verbose. This version of Conv2D simply takes a user-input
# dilation factor, and slices its input weight as requested.
layers.py 文件源码 项目:SMASH 作者: ajbrock 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward(self, input, n_out, dilation, ks = (3,3), groups=1):
        # print(ks, self.kernel_size, dilation, (self.kernel_size[0] - ks[0]) //2, self.kernel_size[0] + (self.kernel_size[0] - ks[0]) //2, (ks[0] + ((ks[0] - 1 ) * (dilation[0] - 1 ))) // 2)
        # print(dilation,ks, tuple(int(item) for item in ( (ks[0] + ((ks[0] - 1 ) * (dilation[0] - 1 ))) // 2, (ks[1] + ((ks[1] - 1 ) * (dilation[1] - 1 ))) // 2)))
        return F.conv2d(input,
                        weight=self.weight[:n_out, 
                             :input.size(1) // groups, 
                             (self.kernel_size[0] - ks[0]) //2 : ks[0] + (self.kernel_size[0] - ks[0]) //2,
                             (self.kernel_size[1] - ks[1]) //2 : ks[1] + (self.kernel_size[1] - ks[1]) //2].contiguous(),
                        dilation=tuple(int(d) for d  in dilation),
                        padding=tuple(int(item) for item in ( (ks[0] + ((ks[0] - 1 ) * (dilation[0] - 1 ))) // 2, (ks[1] + ((ks[1] - 1 ) * (dilation[1] - 1 ))) // 2)),
                        groups=int(groups),
                        bias=None)    
# Simple class that dynamically inserts a nonlinearity between a batchnorm and a conv
layers.py 文件源码 项目:SMASH 作者: ajbrock 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def forward(self,x):
        if self.dilation>1:
            return F.conv2d(input = x,weight=self.conv.weight*V(self.m),padding=self.dilation,bias=None)
        else:
            return self.conv(x)
mnist.py 文件源码 项目:pyscatwave 作者: edouardoyallon 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def f(o, params, stats, mode):
    o = F.batch_norm(o, running_mean=stats['bn.running_mean'],
                     running_var=stats['bn.running_var'],
                     weight=params['bn.weight'],
                     bias=params['bn.bias'], training=mode)
    o = F.conv2d(o, params['conv1.weight'], params['conv1.bias'])
    o = F.relu(o)
    o = o.view(o.size(0), -1)
    o = F.linear(o, params['linear2.weight'], params['linear2.bias'])
    o = F.relu(o)
    o = F.linear(o, params['linear3.weight'], params['linear3.bias'])
    return o


问题


面经


文章

微信
公众号

扫码关注公众号