def __init__(self, input, n_in, n_out, W=None, b=None,
activation=T.tanh, rng=None):
"""
Typical hidden layer of an MLP: units are fully connected and have
tangente hyperbolic activation function. Weight matrix (W) is of shape
(n_in, n_out) and the bias vector (b) is of shape (nout,).
Hidden unit activation is given by: tanh(dot(input, w)+ b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initiaze the weights.
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimension of the input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden layer.
"""
if rng is None:
rng = np.random.RandomState()
super(HiddenLayer, self).__init__(
input, n_in, n_out, activation=activation, rng=rng)
self.reset_layer()
if W is not None:
self.W = W
if b is not None:
self.b = b
self.params = [self.W, self.b]
self.setup_outputs(input)
评论列表
文章目录