def test_forward_gives_correct_output(self):
params = Params({
'input_dim': 2,
'hidden_dims': 3,
'activations': 'relu',
'num_layers': 2
})
feedforward = FeedForward.from_params(params)
constant_init = lambda tensor: torch.nn.init.constant(tensor, 1.)
initializer = InitializerApplicator([(".*", constant_init)])
initializer(feedforward)
input_tensor = Variable(torch.FloatTensor([[-3, 1]]))
output = feedforward(input_tensor).data.numpy()
assert output.shape == (1, 3)
# This output was checked by hand - ReLU makes output after first hidden layer [0, 0, 0],
# which then gets a bias added in the second layer to be [1, 1, 1].
assert_almost_equal(output, [[1, 1, 1]])
评论列表
文章目录