def main():
outputdir = "output.disp.abs"
if not os.path.exists(outputdir):
os.makedirs(outputdir)
waveforms, magnitudes = load_data()
data_split = split_data(waveforms, magnitudes, train_percentage=0.9)
print("dimension of train x and y: ", data_split["train_x"].shape,
data_split["train_y"].shape)
print("dimension of test x and y: ", data_split["test_x"].shape,
data_split["test_y"].shape)
train_loader = make_dataloader(data_split["train_x"],
data_split["train_y"])
rnn = RNN(input_size, hidden_size, num_layers)
rnn.cuda()
print(rnn)
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)
loss_func = nn.MSELoss()
# train
ntest = data_split["train_x"].shape[0]
all_loss = {}
for epoch in range(3):
loss_epoch = []
for step, (batch_x, batch_y) in enumerate(train_loader):
x = torch.unsqueeze(batch_x[0, :, :].t(), dim=1)
if step % int((ntest/100) + 1) == 1:
print('Epoch: ', epoch, '| Step: %d/%d' % (step, ntest),
"| Loss: %f" % np.mean(loss_epoch))
if CUDA_FLAG:
x = Variable(x).cuda()
y = Variable(torch.Tensor([batch_y.numpy(), ])).cuda()
else:
x = Variable(x)
y = Variable(torch.Tensor([batch_y.numpy(), ]))
prediction = rnn(x)
loss = loss_func(prediction, y)
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step()
loss_epoch.append(loss.data[0])
all_loss["epoch_%d" % epoch] = loss_epoch
outputfn = os.path.join(outputdir, "loss.epoch_%d.json" % epoch)
print("epoch loss file: %s" % outputfn)
dump_json(loss_epoch, outputfn)
# test
pred_y = predict_on_test(rnn, data_split["test_x"])
test_y = data_split["test_y"]
_mse = mean_squared_error(test_y, pred_y)
_std = np.std(test_y - pred_y)
print("MSE and error std: %f, %f" % (_mse, _std))
outputfn = os.path.join(outputdir, "prediction.json")
print("output file: %s" % outputfn)
data = {"test_y": list(test_y), "test_y_pred": list(pred_y),
"epoch_loss": all_loss, "mse": _mse, "err_std": _std}
dump_json(data, outputfn)
评论列表
文章目录