python类constant()的实例源码

bnlstm.py 文件源码 项目:FewShotLearning 作者: gitabcworld 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def reset_parameters(self):
        """
        Initialize parameters following the way proposed in the paper.
        """

        # The input-to-hidden weight matrix is initialized orthogonally.
        init.orthogonal(self.weight_ih.data)
        # The hidden-to-hidden weight matrix is initialized as an identity
        # matrix.
        weight_hh_data = torch.eye(self.hidden_size)
        weight_hh_data = weight_hh_data.repeat(1, 4)
        self.weight_hh.data.set_(weight_hh_data)
        # The bias is just set to zero vectors.
        init.constant(self.bias.data, val=0)
        # Initialization of BN parameters.
        self.bn_ih.reset_parameters()
        self.bn_hh.reset_parameters()
        self.bn_c.reset_parameters()
        self.bn_ih.bias.data.fill_(0)
        self.bn_hh.bias.data.fill_(0)
        self.bn_ih.weight.data.fill_(0.1)
        self.bn_hh.weight.data.fill_(0.1)
        self.bn_c.weight.data.fill_(0.1)
model.py 文件源码 项目:samplernn-pytorch 作者: deepsound-project 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, frame_size, dim, q_levels, weight_norm):
        super().__init__()

        self.q_levels = q_levels

        self.embedding = torch.nn.Embedding(
            self.q_levels,
            self.q_levels
        )

        self.input = torch.nn.Conv1d(
            in_channels=q_levels,
            out_channels=dim,
            kernel_size=frame_size,
            bias=False
        )
        init.kaiming_uniform(self.input.weight)
        if weight_norm:
            self.input = torch.nn.utils.weight_norm(self.input)

        self.hidden = torch.nn.Conv1d(
            in_channels=dim,
            out_channels=dim,
            kernel_size=1
        )
        init.kaiming_uniform(self.hidden.weight)
        init.constant(self.hidden.bias, 0)
        if weight_norm:
            self.hidden = torch.nn.utils.weight_norm(self.hidden)

        self.output = torch.nn.Conv1d(
            in_channels=dim,
            out_channels=q_levels,
            kernel_size=1
        )
        nn.lecun_uniform(self.output.weight)
        init.constant(self.output.bias, 0)
        if weight_norm:
            self.output = torch.nn.utils.weight_norm(self.output)
bnlstm.py 文件源码 项目:benchmark 作者: pytorch 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def reset_parameters(self):
        """
        Initialize parameters following the way proposed in the paper.
        """

        # The input-to-hidden weight matrix is initialized orthogonally.
        init.orthogonal(self.weight_ih.data)
        # The hidden-to-hidden weight matrix is initialized as an identity
        # matrix.
        weight_hh_data = torch.eye(self.hidden_size)
        weight_hh_data = weight_hh_data.repeat(1, 4)
        self.weight_hh.data.set_(weight_hh_data)
        # The bias is just set to zero vectors.
        init.constant(self.bias.data, val=0)
        # Initialization of BN parameters.
        self.bn_ih.reset_parameters()
        self.bn_hh.reset_parameters()
        self.bn_c.reset_parameters()
        self.bn_ih.bias.data.fill_(0)
        self.bn_hh.bias.data.fill_(0)
        self.bn_ih.weight.data.fill_(0.1)
        self.bn_hh.weight.data.fill_(0.1)
        self.bn_c.weight.data.fill_(0.1)
treelstm.py 文件源码 项目:unsupervised-treelstm 作者: jihunchoi 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def reset_parameters(self):
        if self.use_leaf_rnn:
            init.kaiming_normal(self.leaf_rnn_cell.weight_ih.data)
            init.orthogonal(self.leaf_rnn_cell.weight_hh.data)
            init.constant(self.leaf_rnn_cell.bias_ih.data, val=0)
            init.constant(self.leaf_rnn_cell.bias_hh.data, val=0)
            # Set forget bias to 1
            self.leaf_rnn_cell.bias_ih.data.chunk(4)[1].fill_(1)
            if self.bidirectional:
                init.kaiming_normal(self.leaf_rnn_cell_bw.weight_ih.data)
                init.orthogonal(self.leaf_rnn_cell_bw.weight_hh.data)
                init.constant(self.leaf_rnn_cell_bw.bias_ih.data, val=0)
                init.constant(self.leaf_rnn_cell_bw.bias_hh.data, val=0)
                # Set forget bias to 1
                self.leaf_rnn_cell_bw.bias_ih.data.chunk(4)[1].fill_(1)
        else:
            init.kaiming_normal(self.word_linear.weight.data)
            init.constant(self.word_linear.bias.data, val=0)
        self.treelstm_layer.reset_parameters()
        init.normal(self.comp_query.data, mean=0, std=0.01)
init.py 文件源码 项目:DeepLab 作者: 2prime 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def msra_init(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight)
            # Modified by lzh @ 201707251408:
            # <<< Old:
            # if m.bias:
            # >>> New:
            if m.bias is not None:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            # Modified by lzh @ 201707241734:
            # <<< Old:
            # if m.bias:
            # >>> New:
            if m.bias is not None:
            # --- End
                init.constant(m.bias, 0)

# Added by lzh @ 201707251404:
utils.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def init_params(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight, mode='fan_out')
            if m.bias:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias:
                init.constant(m.bias, 0)
model.py 文件源码 项目:a3c-mujoco 作者: Feryal 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, observation_space, non_rgb_rgb_state_size, action_space,
                 hidden_size):
        super(ActorCritic, self).__init__()
        self.rgb_state_size = (6, 128, 128)
        self.action_size = 5
        self.relu = nn.ReLU(inplace=True)
        self.softmax = nn.Softmax()

        # the archtecture is adapted from Sim2Real (Rusu et. al., 2016)
        self.conv1 = nn.Conv2d(
            self.rgb_state_size[0], 16, 8, stride=4, padding=1)
        self.conv2 = nn.Conv2d(16, 32, 5, stride=2)
        self.fc1 = nn.Linear(1152 + non_rgb_rgb_state_size, hidden_size)
        self.lstm = nn.LSTMCell(hidden_size, hidden_size)
        self.fc_actor1 = nn.Linear(hidden_size, self.action_size)
        self.fc_actor2 = nn.Linear(hidden_size, self.action_size)
        self.fc_actor3 = nn.Linear(hidden_size, self.action_size)
        self.fc_actor4 = nn.Linear(hidden_size, self.action_size)
        self.fc_actor5 = nn.Linear(hidden_size, self.action_size)
        self.fc_actor6 = nn.Linear(hidden_size, self.action_size)
        self.fc_critic = nn.Linear(hidden_size, 1)

        # Orthogonal weight initialisation
        for name, p in self.named_parameters():
            if 'weight' in name:
                init.orthogonal(p)
            elif 'bias' in name:
                init.constant(p, 0)
l2norm.py 文件源码 项目:ssd.pytorch 作者: amdegroot 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def reset_parameters(self):
        init.constant(self.weight,self.gamma)
regularizers_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_l1_regularization(self):
        model = torch.nn.Sequential(
                torch.nn.Linear(5, 10),
                torch.nn.Linear(10, 5)
        )
        initializer = InitializerApplicator([(".*", lambda tensor: constant(tensor, -1))])
        initializer(model)
        value = RegularizerApplicator([("", L1Regularizer(1.0))])(model)
        # 115 because of biases.
        assert value.data.numpy() == 115.0
regularizers_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_l2_regularization(self):
        model = torch.nn.Sequential(
                torch.nn.Linear(5, 10),
                torch.nn.Linear(10, 5)
        )
        initializer = InitializerApplicator([(".*", lambda tensor: constant(tensor, 0.5))])
        initializer(model)
        value = RegularizerApplicator([("", L2Regularizer(1.0))])(model)
        assert value.data.numpy() == 28.75
regularizers_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_regularizer_applicator_respects_regex_matching(self):
        model = torch.nn.Sequential(
                torch.nn.Linear(5, 10),
                torch.nn.Linear(10, 5)
        )
        initializer = InitializerApplicator([(".*", lambda tensor: constant(tensor, 1.))])
        initializer(model)
        value = RegularizerApplicator([("weight", L2Regularizer(0.5)),
                                       ("bias", L1Regularizer(1.0))])(model)
        assert value.data.numpy() == 65.0
l2norm.py 文件源码 项目:textobjdetection 作者: andfoy 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def reset_parameters(self):
        init.constant(self.weight, self.gamma)
resnet.py 文件源码 项目:open-reid 作者: Cysu 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def reset_params(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal(m.weight, mode='fan_out')
                if m.bias is not None:
                    init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant(m.weight, 1)
                init.constant(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant(m.bias, 0)
inception.py 文件源码 项目:open-reid 作者: Cysu 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def reset_params(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal(m.weight, mode='fan_out')
                if m.bias is not None:
                    init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant(m.weight, 1)
                init.constant(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant(m.bias, 0)
init.py 文件源码 项目:braindecode 作者: robintibor 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def glorot_weight_zero_bias(model):
    """
    Initalize parameters of all modules
    by initializing weights with glorot  uniform/xavier initialization,
    and setting biases to zero.
    Weights from batch norm layers are set to 1.

    Parameters
    ----------
    model: Module
    """
    for module in model.modules():
        if hasattr(module, 'weight'):
            if not ('BatchNorm' in module.__class__.__name__):
                init.xavier_uniform(module.weight, gain=1)
            else:
                init.constant(module.weight, 1)
        if hasattr(module, 'bias'):
            if module.bias is not None:
                init.constant(module.bias, 0)
wide_resnet.py 文件源码 项目:wide-resnet.pytorch 作者: meliketoy 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def conv_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        init.xavier_uniform(m.weight, gain=np.sqrt(2))
        init.constant(m.bias, 0)
    elif classname.find('BatchNorm') != -1:
        init.constant(m.weight, 1)
        init.constant(m.bias, 0)
l2norm.py 文件源码 项目:realtime-action-detection 作者: gurkirt 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def reset_parameters(self):
        init.constant(self.weight,self.gamma)
lstm-classifier.py 文件源码 项目:FewShotLearning 作者: gitabcworld 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def weights_init(self,module):
        for m in module.modules():
            if isinstance(m, nn.Conv2d):
                init.xavier_uniform(m.weight, gain=np.sqrt(2))
                init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
bnlstm.py 文件源码 项目:FewShotLearning 作者: gitabcworld 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def reset_parameters(self):
        """
        Initialize parameters following the way proposed in the paper.
        """

        init.orthogonal(self.weight_ih.data)
        weight_hh_data = torch.eye(self.hidden_size)
        weight_hh_data = weight_hh_data.repeat(1, 4)
        self.weight_hh.data.set_(weight_hh_data)
        # The bias is just set to zero vectors.
        if self.use_bias:
            init.constant(self.bias.data, val=0)
matching-net-classifier.py 文件源码 项目:FewShotLearning 作者: gitabcworld 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def weights_init(self,module):
        for m in module.modules():
            if isinstance(m, nn.Conv2d):
                init.xavier_uniform(m.weight, gain=np.sqrt(2))
                init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
model.py 文件源码 项目:unet-pytorch 作者: jaxony 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def weight_init(m):
        if isinstance(m, nn.Conv2d):
            init.xavier_normal(m.weight)
            init.constant(m.bias, 0)
utils.py 文件源码 项目:pytorch-cifar 作者: kuangliu 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def init_params(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight, mode='fan_out')
            if m.bias:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias:
                init.constant(m.bias, 0)
pc_model.py 文件源码 项目:malmo-challenge 作者: Kaixhin 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, hidden_size):
    super(ActorCritic, self).__init__()
    self.state_size = STATE_SIZE[0] * STATE_SIZE[1] * STATE_SIZE[2]

    self.elu = nn.ELU(inplace=True)
    self.softmax = nn.Softmax()
    self.sigmoid = nn.Sigmoid()

    # Pass state into model body
    self.conv1 = nn.Conv2d(STATE_SIZE[0], 32, 4, stride=2)
    self.conv2 = nn.Conv2d(32, 32, 3)
    self.fc1 = nn.Linear(1152, hidden_size)
    # Pass previous action, reward and timestep directly into LSTM
    self.lstm = nn.LSTMCell(hidden_size + ACTION_SIZE + 2, hidden_size)
    self.fc_actor1 = nn.Linear(hidden_size, ACTION_SIZE)
    self.fc_critic1 = nn.Linear(hidden_size, ACTION_SIZE)
    self.fc_actor2 = nn.Linear(hidden_size, ACTION_SIZE)
    self.fc_critic2 = nn.Linear(hidden_size, ACTION_SIZE)
    self.fc_class = nn.Linear(hidden_size, 1)

    # Orthogonal weight initialisation
    for name, p in self.named_parameters():
      if 'weight' in name:
        init.orthogonal(p)
      elif 'bias' in name:
        init.constant(p, 0)
    # Set LSTM forget gate bias to 1
    for name, p in self.lstm.named_parameters():
      if 'bias' in name:
        n = p.size(0)
        forget_start_idx, forget_end_idx = n // 4, n // 2
        init.constant(p[forget_start_idx:forget_end_idx], 1)
model.py 文件源码 项目:ShuffleNet 作者: jaxony 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def init_params(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal(m.weight, mode='fan_out')
                if m.bias is not None:
                    init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant(m.weight, 1)
                init.constant(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant(m.bias, 0)
test_nn.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_constant(self):
        for as_variable in [True, False]:
            for dims in [1, 2, 4]:
                input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5, as_variable=as_variable)
                val = self._random_float(1, 10)
                init.constant(input_tensor, val)
                if as_variable:
                    input_tensor = input_tensor.data

                self.assertEqual(input_tensor, input_tensor.clone().fill_(val))
bnlstm.py 文件源码 项目:benchmark 作者: pytorch 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def reset_parameters(self):
        """
        Initialize parameters following the way proposed in the paper.
        """

        init.orthogonal(self.weight_ih.data)
        weight_hh_data = torch.eye(self.hidden_size)
        weight_hh_data = weight_hh_data.repeat(1, 4)
        self.weight_hh.data.set_(weight_hh_data)
        # The bias is just set to zero vectors.
        if self.use_bias:
            init.constant(self.bias.data, val=0)
init.py 文件源码 项目:covfefe 作者: deepnn 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def constant(w, val):
    return nn.constant(w, val=val)
model.py 文件源码 项目:unsupervised-treelstm 作者: jihunchoi 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def reset_parameters(self):
        if self.use_batchnorm:
            self.bn_mlp_input.reset_parameters()
            self.bn_mlp_output.reset_parameters()
        for i in range(self.num_layers):
            linear_layer = self.mlp[i][0]
            init.kaiming_normal(linear_layer.weight.data)
            init.constant(linear_layer.bias.data, val=0)
        init.uniform(self.clf_linear.weight.data, -0.005, 0.005)
        init.constant(self.clf_linear.bias.data, val=0)
treelstm.py 文件源码 项目:unsupervised-treelstm 作者: jihunchoi 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def reset_parameters(self):
        init.kaiming_normal(self.comp_linear.weight.data)
        init.constant(self.comp_linear.bias.data, val=0)
model.py 文件源码 项目:unsupervised-treelstm 作者: jihunchoi 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def reset_parameters(self):
        if self.use_batchnorm:
            self.bn_mlp_input.reset_parameters()
            self.bn_mlp_output.reset_parameters()
        for i in range(self.num_layers):
            linear_layer = self.mlp[i][0]
            init.kaiming_normal(linear_layer.weight.data)
            init.constant(linear_layer.bias.data, val=0)
        init.uniform(self.clf_linear.weight.data, -0.002, 0.002)
        init.constant(self.clf_linear.bias.data, val=0)


问题


面经


文章

微信
公众号

扫码关注公众号