python类Adam()的实例源码

solver.py 文件源码 项目:DistanceGAN 作者: sagiebenaim 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def build_model(self):
        """Builds a generator and a discriminator."""
        self.g12 = G12(self.config, conv_dim=self.g_conv_dim)
        self.g21 = G21(self.config, conv_dim=self.g_conv_dim)
        self.d1 = D1(conv_dim=self.d_conv_dim)
        self.d2 = D2(conv_dim=self.d_conv_dim)

        g_params = list(self.g12.parameters()) + list(self.g21.parameters())
        d_params = list(self.d1.parameters()) + list(self.d2.parameters())

        self.g_optimizer = optim.Adam(g_params, self.lr, [self.beta1, self.beta2])
        self.d_optimizer = optim.Adam(d_params, self.lr, [self.beta1, self.beta2])

        if torch.cuda.is_available():
            self.g12.cuda()
            self.g21.cuda()
            self.d1.cuda()
            self.d2.cuda()
test_optim.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_adam(self):
        self._test_rosenbrock(
            lambda params: optim.Adam(params, lr=1e-2),
            wrap_old_fn(old_optim.adam, learningRate=1e-2)
        )
        self._test_rosenbrock(
            lambda params: optim.Adam(params, lr=1e-2, weight_decay=1e-2),
            wrap_old_fn(old_optim.adam, learningRate=1e-2, weightDecay=1e-2)
        )
        self._test_basic_cases(
            lambda weight, bias: optim.Adam([weight, bias], lr=1e-3)
        )
        self._test_basic_cases(
            lambda weight, bias: optim.Adam(
                self._build_params_dict(weight, bias, lr=1e-2),
                lr=1e-3)
        )
solver.py 文件源码 项目:mnist-svhn-transfer 作者: yunjey 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def build_model(self):
        """Builds a generator and a discriminator."""
        self.g12 = G12(conv_dim=self.g_conv_dim)
        self.g21 = G21(conv_dim=self.g_conv_dim)
        self.d1 = D1(conv_dim=self.d_conv_dim, use_labels=self.use_labels)
        self.d2 = D2(conv_dim=self.d_conv_dim, use_labels=self.use_labels)

        g_params = list(self.g12.parameters()) + list(self.g21.parameters())
        d_params = list(self.d1.parameters()) + list(self.d2.parameters())

        self.g_optimizer = optim.Adam(g_params, self.lr, [self.beta1, self.beta2])
        self.d_optimizer = optim.Adam(d_params, self.lr, [self.beta1, self.beta2])

        if torch.cuda.is_available():
            self.g12.cuda()
            self.g21.cuda()
            self.d1.cuda()
            self.d2.cuda()
wordembed.py 文件源码 项目:pytorch-skipthoughts 作者: kaniblu 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def train(self):
        optimizer = O.Adam(self.model.parameters())
        t = tqdm.tqdm()

        for epoch_id in range(self.epochs):
            for x, y in self.data_generator:
                if self.model.W.weight.is_cuda:
                    x = x.cuda()
                    y = y.cuda()

                optimizer.zero_grad()
                loss = self.run_loss(x, y)
                loss.backward()
                optimizer.step()

                loss_val = loss.data[0]
                t.set_description("loss: {}".format(loss_val))
                t.update()
charlm.py 文件源码 项目:Tree-LSTM-LM 作者: vgene 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, args, mapping):
        super(CharLM, self).__init__()

        self.batch_size = args.batch_size
        self.seq_length = args.seq_length
        self.vocab_size = args.vocab_size
        self.embedding_dim = args.embedding_dim
        self.layer_num = args.layer_num
        self.dropout_prob = args.dropout_prob
        self.lr = args.lr
        self.char_embedding = nn.Embedding(self.vocab_size, self.embedding_dim)
        self.dropout = nn.Dropout(self.dropout_prob)

        self.lstm = nn.LSTM(input_size = self.embedding_dim,
                            hidden_size = self.embedding_dim,
                            num_layers= self.layer_num,
                            dropout = self.dropout_prob)
        self.fc = nn.Linear(self.embedding_dim, self.vocab_size)
        self.optimizer = optim.Adam(self.parameters(), lr=self.lr)
        self.mapping = mapping
treelm.py 文件源码 项目:Tree-LSTM-LM 作者: vgene 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def __init__(self, args, attr_size, node_size):
        super(TreeLM, self).__init__()

        self.batch_size = args.batch_size
        self.seq_length = args.seq_length
        self.attr_size = attr_size
        self.node_size = node_size

        self.embedding_dim = args.embedding_dim
        self.layer_num = args.layer_num
        self.dropout_prob = args.dropout_prob
        self.lr = args.lr

        self.attr_embedding = nn.Embedding(self.attr_size, self.embedding_dim)
        self.dropout = nn.Dropout(self.dropout_prob)

        self.lstm = nn.LSTM(input_size = self.embedding_dim,
                            hidden_size = self.embedding_dim,
                            num_layers= self.layer_num,
                            dropout = self.dropout_prob)

        self.fc = nn.Linear(self.embedding_dim, self.node_size)
        self.optimizer = optim.Adam(self.parameters(), lr=self.lr)
        # self.node_mapping = node_mapping
mgru_rte_model.py 文件源码 项目:Recognizing-Textual-Entailment 作者: codedecde 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def fit_batch(self, premise_batch, hypothesis_batch, y_batch):
        if not hasattr(self, 'criterion'):
            self.criterion = nn.NLLLoss()
        if not hasattr(self, 'optimizer'):
            self.optimizer = optim.Adam(self.parameters(), lr=self.options['LR'], betas=(0.9, 0.999), eps=1e-08, weight_decay=self.options['L2'])

        self.optimizer.zero_grad()
        preds = self.__call__(premise_batch, hypothesis_batch, training=True)
        loss = self.criterion(preds, y_batch)
        loss.backward()
        self.optimizer.step()

        _, pred_labels = torch.max(preds, dim=-1, keepdim=True)
        y_true = self._get_numpy_array_from_variable(y_batch)
        y_pred = self._get_numpy_array_from_variable(pred_labels)
        acc = accuracy_score(y_true, y_pred)

        ret_loss = self._get_numpy_array_from_variable(loss)[0]
        return ret_loss, acc
rte_model.py 文件源码 项目:Recognizing-Textual-Entailment 作者: codedecde 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def fit_batch(self, premise_batch, hypothesis_batch, y_batch):
        if not hasattr(self,'criterion'):
            self.criterion = nn.NLLLoss()
        if not hasattr(self, 'optimizer'):
            self.optimizer = optim.Adam(self.parameters(),  lr=self.options['LR'], betas=(0.9, 0.999), eps=1e-08, weight_decay=self.options['L2'])

        self.optimizer.zero_grad()
        preds = self.__call__(premise_batch, hypothesis_batch, training= True)
        loss = self.criterion(preds, y_batch)
        loss.backward()
        self.optimizer.step()

        _, pred_labels = torch.max(preds, dim=-1, keepdim = True)
        y_true = self._get_numpy_array_from_variable(y_batch)
        y_pred = self._get_numpy_array_from_variable(pred_labels)
        acc = accuracy_score(y_true, y_pred)

        ret_loss = self._get_numpy_array_from_variable(loss)[0]
        return ret_loss, acc
nets.py 文件源码 项目:e2e-model-learning 作者: locuslab 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def run_rmse_net(model, variables, X_train, Y_train):
    opt = optim.Adam(model.parameters(), lr=1e-3)

    for i in range(1000):
        opt.zero_grad()
        model.train()
        train_loss = nn.MSELoss()(
            model(variables['X_train_'])[0], variables['Y_train_'])
        train_loss.backward()
        opt.step()

        model.eval()
        test_loss = nn.MSELoss()(
            model(variables['X_test_'])[0], variables['Y_test_'])

        print(i, train_loss.data[0], test_loss.data[0])

    model.eval()
    model.set_sig(variables['X_train_'], variables['Y_train_'])

    return model


# TODO: minibatching
train.py 文件源码 项目:optnet 作者: locuslab 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_optimizer(args, params):
    if args.dataset == 'mnist':
        if args.model == 'optnet-eq':
            params = list(params)
            A_param = params.pop(0)
            assert(A_param.size() == (args.neq, args.nHidden))
            optimizer = optim.Adam([
                {'params': params, 'lr': 1e-3},
                {'params': [A_param], 'lr': 1e-1}
            ])
        else:
            optimizer = optim.Adam(params)
    elif args.dataset in ('cifar-10', 'cifar-100'):
        if args.opt == 'sgd':
            optimizer = optim.SGD(params, lr=1e-1, momentum=0.9, weight_decay=args.weightDecay)
        elif args.opt == 'adam':
            optimizer = optim.Adam(params, weight_decay=args.weightDecay)
    else:
        assert(False)

    return optimizer
test_optim.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def test_adam(self):
        self._test_rosenbrock(
            lambda params: optim.Adam(params, lr=1e-2),
            wrap_old_fn(old_optim.adam, learningRate=1e-2)
        )
        self._test_rosenbrock(
            lambda params: optim.Adam(params, lr=1e-2, weight_decay=1e-2),
            wrap_old_fn(old_optim.adam, learningRate=1e-2, weightDecay=1e-2)
        )
        self._test_basic_cases(
            lambda weight, bias: optim.Adam([weight, bias], lr=1e-3)
        )
        self._test_basic_cases(
            lambda weight, bias: optim.Adam(
                self._build_params_dict(weight, bias, lr=1e-2),
                lr=1e-3)
        )
test_optim.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def test_adam(self):
        self._test_rosenbrock(
            lambda params: optim.Adam(params, lr=1e-2),
            wrap_old_fn(old_optim.adam, learningRate=1e-2)
        )
        self._test_rosenbrock(
            lambda params: optim.Adam(params, lr=1e-2, weight_decay=1e-2),
            wrap_old_fn(old_optim.adam, learningRate=1e-2, weightDecay=1e-2)
        )
        self._test_basic_cases(
            lambda weight, bias: optim.Adam([weight, bias], lr=1e-3)
        )
        self._test_basic_cases(
            lambda weight, bias: optim.Adam(
                self._build_params_dict(weight, bias, lr=1e-2),
                lr=1e-3)
        )
BiLSTM_Model.py 文件源码 项目:BiLSTM-CCM 作者: codedecde 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def train_epoch(self, X, y, show_bar=True):
        optimizer = optim.Adam(self.parameters())
        if show_bar:
            bar = Progbar(len(X))
        for ix, (elem, tags) in enumerate(zip(X, y)):
            self.zero_grad()
            sentence, feature_vector, sentence_markers = self.get_sentence_feature_vector(elem)
            if self.GPU:
                targets = torch.LongTensor(tags).cuda()
            else:
                targets = torch.LongTensor(tags)
            neg_log_likelihood = self.neg_log_likelihood(sentence, feature_vector, targets)
            neg_log_likelihood.backward()
            optimizer.step()
            if show_bar:
                bar.update(ix + 1)
        if show_bar:
            print ''
        sys.stdout.flush()
test_optim.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def test_adam(self):
        self._test_rosenbrock(
            lambda params: optim.Adam(params, lr=1e-2),
            wrap_old_fn(old_optim.adam, learningRate=1e-2)
        )
        self._test_rosenbrock(
            lambda params: optim.Adam(params, lr=1e-2, weight_decay=1e-2),
            wrap_old_fn(old_optim.adam, learningRate=1e-2, weightDecay=1e-2)
        )
        self._test_basic_cases(
            lambda weight, bias: optim.Adam([weight, bias], lr=1e-3)
        )
        self._test_basic_cases(
            lambda weight, bias: optim.Adam(
                self._build_params_dict(weight, bias, lr=1e-2),
                lr=1e-3)
        )
kissgp_kronecker_product_classification_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_kissgp_classification_error():
    model = GPClassificationModel()

    # Find optimal model hyperparameters
    model.train()
    optimizer = optim.Adam(model.parameters(), lr=0.15)
    optimizer.n_iter = 0
    for i in range(20):
        optimizer.zero_grad()
        output = model.forward(train_x)
        loss = -model.marginal_log_likelihood(output, train_y)
        loss.backward()
        optimizer.n_iter += 1
        optimizer.step()

    # Set back to eval mode
    model.eval()
    test_preds = model(train_x).mean().ge(0.5).float().mul(2).sub(1).squeeze()
    mean_abs_error = torch.mean(torch.abs(train_y - test_preds) / 2)
    assert(mean_abs_error.data.squeeze()[0] < 1e-5)
kissgp_gp_classification_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_kissgp_classification_error():
    model = GPClassificationModel()

    # Find optimal model hyperparameters
    model.train()
    optimizer = optim.Adam(model.parameters(), lr=0.15)
    optimizer.n_iter = 0
    for i in range(200):
        optimizer.zero_grad()
        output = model.forward(train_x)
        loss = -model.marginal_log_likelihood(output, train_y)
        loss.backward()
        optimizer.n_iter += 1
        optimizer.step()

    # Set back to eval mode
    model.eval()
    test_preds = model(train_x).mean().ge(0.5).float().mul(2).sub(1).squeeze()
    mean_abs_error = torch.mean(torch.abs(train_y - test_preds) / 2)
    assert(mean_abs_error.data.squeeze()[0] < 1e-5)
simple_gp_classification_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_kissgp_classification_error():
    train_x, train_y = train_data()
    model = GPClassificationModel(train_x.data)

    # Find optimal model hyperparameters
    model.train()
    optimizer = optim.Adam(model.parameters(), lr=0.1)
    optimizer.n_iter = 0
    for i in range(50):
        optimizer.zero_grad()
        output = model.forward(train_x)
        loss = -model.marginal_log_likelihood(output, train_y)
        loss.backward()
        optimizer.n_iter += 1
        optimizer.step()

    # Set back to eval mode
    model.eval()
    test_preds = model(train_x).mean().ge(0.5).float().mul(2).sub(1).squeeze()
    mean_abs_error = torch.mean(torch.abs(train_y - test_preds) / 2)
    assert(mean_abs_error.data.squeeze()[0] < 1e-5)
simple_gp_classification_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_kissgp_classification_error_cuda():
    if torch.cuda.is_available():
        train_x, train_y = train_data(cuda=True)
        model = GPClassificationModel(train_x.data).cuda()
        model.condition(train_x, train_y)

        # Find optimal model hyperparameters
        model.train()
        optimizer = optim.Adam(model.parameters(), lr=0.1)
        optimizer.n_iter = 0
        for i in range(50):
            optimizer.zero_grad()
            output = model.forward(train_x)
            loss = -model.marginal_log_likelihood(output, train_y)
            loss.backward()
            optimizer.n_iter += 1
            optimizer.step()

        # Set back to eval mode
        model.eval()
        test_preds = model(train_x).mean().ge(0.5).float().mul(2).sub(1).squeeze()
        mean_abs_error = torch.mean(torch.abs(train_y - test_preds) / 2)
        assert(mean_abs_error.data.squeeze()[0] < 1e-5)
kissgp_additive_classification_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_kissgp_classification_error():
    gpytorch.functions.use_toeplitz = False
    model = GPClassificationModel()

    # Find optimal model hyperparameters
    model.train()
    optimizer = optim.Adam(model.parameters(), lr=0.15)
    optimizer.n_iter = 0
    for i in range(100):
        optimizer.zero_grad()
        output = model.forward(train_x)
        loss = -model.marginal_log_likelihood(output, train_y)
        loss.backward()
        optimizer.n_iter += 1
        optimizer.step()

    # Set back to eval mode
    model.eval()
    test_preds = model(train_x).mean().ge(0.5).float().mul(2).sub(1).squeeze()
    mean_abs_error = torch.mean(torch.abs(train_y - test_preds) / 2)
    gpytorch.functions.use_toeplitz = True
    assert(mean_abs_error.data.squeeze()[0] < 5e-2)
spectral_mixture_gp_regression_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_spectral_mixture_gp_mean_abs_error():
    gp_model = SpectralMixtureGPModel()

    # Optimize the model
    gp_model.train()
    optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
    optimizer.n_iter = 0

    gpytorch.functions.fastest = False
    for i in range(50):
        optimizer.zero_grad()
        output = gp_model(train_x)
        loss = -gp_model.marginal_log_likelihood(output, train_y)
        loss.backward()
        optimizer.n_iter += 1
        optimizer.step()

    # Test the model
    gp_model.eval()
    gp_model.condition(train_x, train_y)
    test_preds = gp_model(test_x).mean()
    mean_abs_error = torch.mean(torch.abs(test_y - test_preds))

    # The spectral mixture kernel should be trivially able to extrapolate the sine function.
    assert(mean_abs_error.data.squeeze()[0] < 0.05)
kissgp_kronecker_product_regression_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def test_kissgp_gp_mean_abs_error():
    gp_model = GPRegressionModel()

    # Optimize the model
    gp_model.train()
    optimizer = optim.Adam(gp_model.parameters(), lr=0.2)
    optimizer.n_iter = 0
    for i in range(20):
        optimizer.zero_grad()
        output = gp_model(train_x)
        loss = -gp_model.marginal_log_likelihood(output, train_y)
        loss.backward()
        optimizer.n_iter += 1
        optimizer.step()

    # Test the model
    gp_model.eval()
    gp_model.condition(train_x, train_y)
    test_preds = gp_model(test_x).mean()
    mean_abs_error = torch.mean(torch.abs(test_y - test_preds))
    assert(mean_abs_error.data.squeeze()[0] < 0.1)
kissgp_additive_regression_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_kissgp_gp_mean_abs_error():
    gp_model = GPRegressionModel()

    # Optimize the model
    gp_model.train()
    optimizer = optim.Adam(gp_model.parameters(), lr=0.2)
    optimizer.n_iter = 0
    for i in range(20):
        optimizer.zero_grad()
        output = gp_model(train_x)
        loss = -gp_model.marginal_log_likelihood(output, train_y)
        loss.backward()
        optimizer.n_iter += 1
        optimizer.step()

    # Test the model
    gp_model.eval()
    gp_model.condition(train_x, train_y)
    test_preds = gp_model(test_x).mean()
    mean_abs_error = torch.mean(torch.abs(test_y - test_preds))
    assert(mean_abs_error.data.squeeze()[0] < 0.1)
kissgp_gp_regression_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_kissgp_gp_mean_abs_error_cuda():
    if torch.cuda.is_available():
        train_x, train_y, test_x, test_y = make_data(cuda=True)
        gp_model = GPRegressionModel().cuda()

        # Optimize the model
        gp_model.train()
        optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
        optimizer.n_iter = 0
        for i in range(25):
            optimizer.zero_grad()
            output = gp_model(train_x)
            loss = -gp_model.marginal_log_likelihood(output, train_y)
            loss.backward()
            optimizer.n_iter += 1
            optimizer.step()

        # Test the model
        gp_model.eval()
        gp_model.condition(train_x, train_y)
        test_preds = gp_model(test_x).mean()
        mean_abs_error = torch.mean(torch.abs(test_y - test_preds))

        assert(mean_abs_error.data.squeeze()[0] < 0.02)
ENet-train.py 文件源码 项目:PytorchDL 作者: FredHuangBia 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, model, criterion, opt, optimState):
        self.model = model
        self.criterion = criterion
        self.optimState = optimState
        if self.optimState == None:
            self.optimState = { 'learningRate' : opt.LR,
                                'learningRateDecay' : opt.LRDParam,
                                'momentum' : opt.momentum,
                                'nesterov' : False,
                                'dampening'  : opt.dampening,
                                'weightDecay' : opt.weightDecay
                            }
        self.opt = opt
        if opt.optimizer == 'SGD':
            self.optimizer = optim.SGD(model.parameters(), lr=opt.LR, momentum=opt.momentum, dampening=opt.dampening, weight_decay=opt.weightDecay)
        elif opt.optimizer == 'Adam':
            self.optimizer = optim.Adam(model.parameters(), lr=opt.LR, betas=(opt.momentum, 0.999), eps=1e-8, weight_decay=opt.weightDecay)

        self.logger = { 'train' : open(os.path.join(opt.resume, 'train.log'), 'a+'), 
                        'val' : open(os.path.join(opt.resume, 'val.log'), 'a+')
                    }
ERFNet2-train.py 文件源码 项目:PytorchDL 作者: FredHuangBia 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __init__(self, model, criterion, opt, optimState):
        self.model = model
        self.criterion = criterion
        self.optimState = optimState
        if self.optimState == None:
            self.optimState = { 'learningRate' : opt.LR,
                                'learningRateDecay' : opt.LRDParam,
                                'momentum' : opt.momentum,
                                'nesterov' : False,
                                'dampening'  : opt.dampening,
                                'weightDecay' : opt.weightDecay
                            }
        self.opt = opt
        if opt.optimizer == 'SGD':
            self.optimizer = optim.SGD(model.parameters(), lr=opt.LR, momentum=opt.momentum, dampening=opt.dampening, weight_decay=opt.weightDecay)
        elif opt.optimizer == 'Adam':
            self.optimizer = optim.Adam(model.parameters(), lr=opt.LR, betas=(opt.momentum, 0.999), eps=1e-8, weight_decay=opt.weightDecay)

        self.logger = { 'train' : open(os.path.join(opt.resume, 'train.log'), 'a+'), 
                        'val' : open(os.path.join(opt.resume, 'val.log'), 'a+')
                    }
MCCNN-train.py 文件源码 项目:PytorchDL 作者: FredHuangBia 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, model, criterion, opt, optimState):
        self.model = model
        self.criterion = criterion
        self.optimState = optimState
        if self.optimState == None:
            self.optimState = { 'learningRate' : opt.LR,
                                'learningRateDecay' : opt.LRDParam,
                                'momentum' : opt.momentum,
                                'nesterov' : False,
                                'dampening'  : opt.dampening,
                                'weightDecay' : opt.weightDecay
                            }
        self.opt = opt
        if opt.optimizer == 'SGD':
            self.optimizer = optim.SGD(model.parameters(), lr=opt.LR, momentum=opt.momentum, dampening=opt.dampening, weight_decay=opt.weightDecay)
        elif opt.optimizer == 'Adam':
            self.optimizer = optim.Adam(model.parameters(), lr=opt.LR, betas=(0.9,0.999), eps=1e-8, weight_decay=opt.weightDecay)

        self.logger = { 'train' : open(os.path.join(opt.resume, 'train.log'), 'a+'), 
                        'val' : open(os.path.join(opt.resume, 'val.log'), 'a+')
                    }
FPAE-train.py 文件源码 项目:PytorchDL 作者: FredHuangBia 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, model, criterion, opt, optimState):
        self.model = model
        self.criterion = criterion
        self.optimState = optimState
        if self.optimState == None:
            self.optimState = { 'learningRate' : opt.LR,
                                'learningRateDecay' : opt.LRDParam,
                                'momentum' : opt.momentum,
                                'nesterov' : False,
                                'dampening'  : opt.dampening,
                                'weightDecay' : opt.weightDecay
                            }
        self.opt = opt
        if opt.optimizer == 'SGD':
            self.optimizer = optim.SGD(model.parameters(), lr=opt.LR, momentum=opt.momentum, dampening=opt.dampening, weight_decay=opt.weightDecay)
        elif opt.optimizer == 'Adam':
            self.optimizer = optim.Adam(model.parameters(), lr=opt.LR, betas=(opt.momentum, 0.999), eps=1e-8, weight_decay=opt.weightDecay)

        self.logger = { 'train' : open(os.path.join(opt.resume, 'train.log'), 'a+'), 
                        'val' : open(os.path.join(opt.resume, 'val.log'), 'a+')
                    }
PSPNet-train.py 文件源码 项目:PytorchDL 作者: FredHuangBia 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, model, criterion, opt, optimState):
        self.model = model
        self.criterion = criterion
        self.optimState = optimState
        if self.optimState == None:
            self.optimState = { 'learningRate' : opt.LR,
                                'learningRateDecay' : opt.LRDParam,
                                'momentum' : opt.momentum,
                                'nesterov' : False,
                                'dampening'  : opt.dampening,
                                'weightDecay' : opt.weightDecay
                            }
        self.opt = opt
        if opt.optimizer == 'SGD':
            self.optimizer = optim.SGD(model.parameters(), lr=opt.LR, momentum=opt.momentum, dampening=opt.dampening, weight_decay=opt.weightDecay)
        elif opt.optimizer == 'Adam':
            self.optimizer = optim.Adam(model.parameters(), lr=opt.LR, betas=(opt.momentum, 0.999), eps=1e-8, weight_decay=opt.weightDecay)

        self.logger = { 'train' : open(os.path.join(opt.resume, 'train.log'), 'a+'), 
                        'val' : open(os.path.join(opt.resume, 'val.log'), 'a+')
                    }
ERFNet-train.py 文件源码 项目:PytorchDL 作者: FredHuangBia 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __init__(self, model, criterion, opt, optimState):
        self.model = model
        self.criterion = criterion
        self.optimState = optimState
        if self.optimState == None:
            self.optimState = { 'learningRate' : opt.LR,
                                'learningRateDecay' : opt.LRDParam,
                                'momentum' : opt.momentum,
                                'nesterov' : False,
                                'dampening'  : opt.dampening,
                                'weightDecay' : opt.weightDecay
                            }
        self.opt = opt
        if opt.optimizer == 'SGD':
            self.optimizer = optim.SGD(model.parameters(), lr=opt.LR, momentum=opt.momentum, dampening=opt.dampening, weight_decay=opt.weightDecay)
        elif opt.optimizer == 'Adam':
            self.optimizer = optim.Adam(model.parameters(), lr=opt.LR, betas=(opt.momentum, 0.999), eps=1e-8, weight_decay=opt.weightDecay)

        self.logger = { 'train' : open(os.path.join(opt.resume, 'train.log'), 'a+'), 
                        'val' : open(os.path.join(opt.resume, 'val.log'), 'a+')
                    }
kanji_gan.py 文件源码 项目:MachineLearning 作者: timomernick 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def __init__(self):
        super(Generator, self).__init__()
        self.main = nn.Sequential(
            nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 4),
            nn.ReLU(True),
            nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 2),
            nn.ReLU(True),
            nn.ConvTranspose2d(ngf * 2, ngf * 1, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 1),
            nn.ReLU(True),
            nn.ConvTranspose2d(ngf * 1, nc, 4, 2, 1, bias=False),
            nn.Tanh()
        )
        self.apply(weights_init)
        self.optimizer = optim.Adam(self.parameters(), lr=learning_rate, betas=(beta_1, beta_2))
        #self.optimizer = optim.RMSprop(self.parameters(), lr=learning_rate, alpha=beta_2)


问题


面经


文章

微信
公众号

扫码关注公众号