def mdclW(num_filters,num_channels,filter_size,winit,name,scales):
# Coefficient Initializer
sinit = lasagne.init.Constant(1.0/(1+len(scales)))
# Total filter size
size = filter_size + (filter_size-1)*(scales[-1]-1)
# Multiscale Dilated Filter
W = T.zeros((num_filters,num_channels,size,size))
# Undilated Base Filter
baseW = theano.shared(lasagne.utils.floatX(winit.sample((num_filters,num_channels,filter_size,filter_size))),name=name+'.W')
for scale in enumerate(scales[::-1]): # enumerate backwards so that we place the main filter on top
W = T.set_subtensor(W[:,:,scales[-1]-scale:size-scales[-1]+scale:scale,scales[-1]-scale:size-scales[-1]+scale:scale],
baseW*theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)), name+'.coeff_'+str(scale)).dimshuffle(0,'x','x','x'))
return W
# Subpixel Upsample Layer from (https://arxiv.org/abs/1609.05158)
# This layer uses a set of r^2 set_subtensor calls to reorganize the tensor in a subpixel-layer upscaling style
# as done in the ESPCN Magic ony paper for super-resolution.
# r is the upscale factor.
# c is the number of output channels.
评论列表
文章目录