def convolutionalLayer(inpu, feature_map, batch, length, window, dim, prefix, params, names):
down = window / 2
up = window - down - 1
zodown = T.zeros((batch, 1, down, dim), dtype=theano.config.floatX)
zoup = T.zeros((batch, 1, up, dim), dtype=theano.config.floatX)
inps = T.cast(T.concatenate([zoup, inpu, zodown], axis=2), dtype=theano.config.floatX)
fan_in = window * dim
fan_out = feature_map * window * dim / length #(length - window + 1)
filter_shape = (feature_map, 1, window, dim)
image_shape = (batch, 1, length + down + up, dim)
#if non_linear=="none" or non_linear=="relu":
# conv_W = theano.shared(0.2 * numpy.random.uniform(low=-1.0,high=1.0,\
# size=filter_shape).astype(theano.config.floatX))
#else:
# W_bound = numpy.sqrt(6. / (fan_in + fan_out))
# conv_W = theano.shared(numpy.random.uniform(low=-W_bound,high=W_bound,\
# size=filter_shape).astype(theano.config.floatX))
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
conv_W = theano.shared(numpy.random.uniform(low=-W_bound,high=W_bound,\
size=filter_shape).astype(theano.config.floatX))
conv_b = theano.shared(numpy.zeros(filter_shape[0], dtype=theano.config.floatX))
# bundle
params += [ conv_W, conv_b ]
names += [ prefix + '_convL_W_' + str(window), prefix + '_convL_b_' + str(window) ]
conv_out = conv.conv2d(input=inps, filters=conv_W, filter_shape=filter_shape, image_shape=image_shape)
conv_out = T.tanh(conv_out + conv_b.dimshuffle('x', 0, 'x', 'x'))
return conv_out.dimshuffle(0,2,1,3).flatten(3)
评论列表
文章目录