def convolve(self, input, deterministic=False, **kwargs):
""" Binary convolution. Both inputs and weights are binary (+1 or -1)
This overrides convolve operation from Conv2DLayer implementation
"""
if(self.xnor):
# compute the binary inputs H and the scaling matrix K
input, K = binarize_conv_input(input, self.beta_filter)
# Compute the binarized filters are the scaling matrix
self.Wb, alpha = binarize_conv_filters(self.W)
if not deterministic:
old_alpha = theano.clone(self.xalpha, share_inputs=False)
old_alpha.default_update = alpha
alpha += 0*old_alpha
else:
alpha = self.xalpha
# TODO: Use XNOR ops for the convolution. As of now using Lasagne's convolution for
# functionality verification.
# approx weight tensor
#W_full_precision = self.Wb * alpha.dimshuffle(0, 'x', 'x', 'x')
Wr = self.W
self.W = self.Wb
feat_maps = super(Conv2DLayer, self).convolve(input, **kwargs)
# restore the approx full precision weight for gradiant computation
#self.W = W_full_precision
self.W = Wr
# scale by K and alpha
# FIXME: Actually we are scaling after adding bias here. Need to scale first and then add bias.
# The super class method automatically adds bias. Somehow need to overcome this..
# may subtract the bias, scale by alpha and beta ans then add bias ?
feat_maps = feat_maps * K
feat_maps = feat_maps * alpha.dimshuffle('x', 0, 'x', 'x')
else:
feat_maps = super(Conv2DLayer, self).convolve(input, **kwargs)
return feat_maps
评论列表
文章目录