def BinarizedAffine(nOutputPlane, bias=True, name=None, reuse=None):
def b_affineLayer(x, is_training=True):
with tf.variable_op_scope([x], name, 'Affine', reuse=reuse):
'''
Note that we use binarized version of the input (bin_x) and the weights (bin_w). Since the binarized function uses STE
we calculate the gradients using bin_x and bin_w but we update w (the full precition version).
'''
bin_x = binarize(x)
reshaped = tf.reshape(bin_x, [x.get_shape().as_list()[0], -1])
nInputPlane = reshaped.get_shape().as_list()[1]
w = tf.get_variable('weight', [nInputPlane, nOutputPlane], initializer=tf.contrib.layers.xavier_initializer())
bin_w = binarize(w)
output = tf.matmul(reshaped, bin_w)
if bias:
b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
output = tf.nn.bias_add(output, b)
return output
return b_affineLayer
评论列表
文章目录