def u_net(self, x, layers=4, base_channel=64, train=True):
ds_layers = {}
ds_layer_shape = {}
# down sample layers
for layer in range(0, layers-1):
f_channels = base_channel * (2**layer)
layer_name = 'ds_{}'.format(layer)
if layer == 0:
x = conv2d(x, [3, 3, 3, f_channels], layer_name + '_1')
else:
x = conv2d(x, [3, 3, f_channels/2, f_channels], layer_name + '_1')
x = conv2d(x, [3, 3, f_channels, f_channels], layer_name + '_2')
ds_layers[layer] = x
ds_layer_shape[layer] = tf.shape(x)
x = maxpooling(x)
# bottom layer
f_channels = base_channel * (2**(layers-1))
x = conv2d(x, [3, 3, f_channels/2, f_channels], 'bottom_1')
x = conv2d(x, [3, 3, f_channels, f_channels], 'bottom_2')
# up sample layers
for layer in range(layers-2, -1, -1):
f_channels = base_channel * (2**layer)
layer_name = 'up_{}'.format(layer)
x = deconv2d(x, [3, 3, f_channels, 2*f_channels], ds_layer_shape[layer], layer_name + '_deconv2d')
# add the previous down sumple layer to the up sample layer
x = concat(ds_layers[layer], x)
x = conv2d(x, [3, 3, 2*f_channels, f_channels], layer_name + '_conv_1')
x = conv2d(x, [3, 3, f_channels, f_channels], layer_name + '_conv_2')
#if train:
# x = tf.nn.dropout(x, self.dropout)
# add 1x1 convolution layer to change channel to one
x = conv2d(x, [1, 1, base_channel, 1], 'conv_1x1', activation='no')
logits = tf.squeeze(x, axis=3)
return logits
评论列表
文章目录