def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4):
''' SubpixelConvolutional Upscaling (factor = 2)
Args:
ip: keras tensor
nb_filters: number of layers
type: can be 'upsampling', 'subpixel', 'deconv'. Determines type of upsampling performed
weight_decay: weight decay factor
Returns: keras tensor, after applying upsampling operation.
'''
if type == 'upsampling':
x = UpSampling2D()(ip)
elif type == 'subpixel':
x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay),
use_bias=False, kernel_initializer='he_normal')(ip)
x = SubPixelUpscaling(scale_factor=2)(x)
x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay),
use_bias=False, kernel_initializer='he_normal')(x)
else:
x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(ip)
return x
python类Conv2D()的实例源码
def get_unet0(num_start_filters=32):
inputs = Input((img_rows, img_cols, num_channels))
conv1 = ConvBN2(inputs, num_start_filters)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = ConvBN2(pool1, 2 * num_start_filters)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = ConvBN2(pool2, 4 * num_start_filters)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = ConvBN2(pool3, 8 * num_start_filters)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = ConvBN2(pool4, 16 * num_start_filters)
up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4])
conv6 = ConvBN2(up6, 8 * num_start_filters)
up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3])
conv7 = ConvBN2(up7, 4 * num_start_filters)
up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2])
conv8 = ConvBN2(up8, 2 * num_start_filters)
up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1])
conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(up9)
conv9 = BatchNormalization()(conv9)
conv9 = Activation('selu')(conv9)
conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(conv9)
crop9 = Cropping2D(cropping=((16, 16), (16, 16)))(conv9)
conv9 = BatchNormalization()(crop9)
conv9 = Activation('selu')(conv9)
conv10 = Conv2D(num_mask_channels, (1, 1))(conv9)
model = Model(inputs=inputs, outputs=conv10)
return model
def build_simpleCNN(input_shape = (32, 32, 3), num_output = 10):
h, w, nch = input_shape
assert h == w, 'expect input shape (h, w, nch), h == w'
images = Input(shape = (h, h, nch))
x = Conv2D(64, (4, 4), strides = (1, 1),
kernel_initializer = init, padding = 'same')(images)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size = (2, 2))(x)
x = Conv2D(128, (4, 4), strides = (1, 1),
kernel_initializer = init, padding = 'same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size = (2, 2))(x)
x = Flatten()(x)
outputs = Dense(num_output, kernel_initializer = init,
activation = 'softmax')(x)
model = Model(inputs = images, outputs = outputs)
return model
def _shortcut(inputs, x):
# shortcut path
_, inputs_w, inputs_h, inputs_ch = K.int_shape(inputs)
_, x_w, x_h, x_ch = K.int_shape(x)
stride_w = int(round(inputs_w / x_w))
stride_h = int(round(inputs_h / x_h))
equal_ch = inputs_ch == x_ch
if stride_w>1 or stride_h>1 or not equal_ch:
shortcut = Conv2D(x_ch, (1, 1),
strides = (stride_w, stride_h),
kernel_initializer = init, padding = 'valid')(inputs)
else:
shortcut = inputs
merged = Add()([shortcut, x])
return merged
def build(input_shape, classes):
model = Sequential()
# CONV => RELU => POOL
model.add(Conv2D(20, kernel_size=5, padding="same",
input_shape=input_shape))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# CONV => RELU => POOL
model.add(Conv2D(50, kernel_size=5, padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Flatten => RELU layers
model.add(Flatten())
model.add(Dense(500))
model.add(Activation("relu"))
# a softmax classifier
model.add(Dense(classes))
model.add(Activation("softmax"))
return model
# network and training
def make_model(batch_size, image_dim):
model = Sequential()
model.add(BatchNormalization(batch_input_shape=(batch_size,image_dim[1],image_dim[2],1)))
model.add(Conv2D( 16 , [3,3], activation='relu',padding='same'))
#model.add(Dropout(0.2))
model.add(Conv2D( 32 , [3,3], activation='relu',padding='same'))
#model.add(Dropout(0.2))
model.add(Conv2D( 64 , [3,3], activation='relu',padding='same'))
model.add(Dropout(0.2))
#model.add(Conv2D( 16 , [3,3], activation='relu',padding='same'))
#model.add(Dropout(0.2))
#model.add(Conv2D( 16 , [3,3], activation='relu',padding='same'))
#model.add(Dropout(0.2))
#model.add(Conv2D( 16 , [3,3], activation='relu',padding='same'))
#model.add(Conv2D(64, (3, 3), activation='relu',padding='same'))
#model.add(Conv2D(64, (3, 3), activation='relu',padding='same'))
#model.add(Conv2D(64, (3, 3), activation='relu',padding='same'))
model.add(Conv2D(1, kernel_size=1, padding='same', activation='sigmoid'))
return(model)
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)
pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)
# out = merge([out,pooling],mode='sum')
out = add([out,pooling])
return out
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = BatchNormalization()(x)
out = Activation('relu')(out)
out = Conv2D(k1,kernel_size,padding='same',data_format='channels_last')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)
pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)
out = add([out, pooling])
#out = merge([out,pooling])
return out
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)
pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)
# out = merge([out,pooling],mode='sum')
out = add([out,pooling])
return out
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = BatchNormalization()(x)
out = Activation('relu')(out)
out = Conv2D(k1,kernel_size,padding='same',data_format='channels_last')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)
pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)
out = add([out, pooling])
#out = merge([out,pooling])
return out
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=2,dropout=0.5):
k1,k2 = filters
out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv2D(k2,kernel_size,2,padding='same',data_format='channels_last')(out)
pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)
# out = merge([out,pooling],mode='sum')
out = add([out,pooling])
return out
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = BatchNormalization()(x)
out = Activation('relu')(out)
out = Conv2D(k1,kernel_size,2,padding='same',data_format='channels_last')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv2D(k2,kernel_size,2,padding='same',data_format='channels_last')(out)
pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)
out = add([out, pooling])
#out = merge([out,pooling])
return out
def build_generator(self):
model = Sequential()
model.add(Dense(1024, activation='relu', input_dim=self.latent_dim))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(128 * 7 * 7, activation="relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Reshape((7, 7, 128)))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(self.channels, kernel_size=4, padding='same'))
model.add(Activation("tanh"))
model.summary()
gen_input = Input(shape=(self.latent_dim,))
img = model(gen_input)
return Model(gen_input, img)
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=100))
model.add(Reshape((7, 7, 128)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(1, kernel_size=3, padding="same"))
model.add(Activation("tanh"))
model.summary()
noise = Input(shape=(100,))
img = model(noise)
return Model(noise, img)
def set_cnn_model(ninstance=4, input_dim = 4, input_length = 107):
nbfilter = 16
model = Sequential() # #seqs * seqlen * 4
#model.add(brnn)
model.add(Conv2D(input_shape=(ninstance, input_length, input_dim),
filters=nbfilter,
kernel_size=(1,10),
padding="valid",
#activation="relu",
strides=1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(1,3))) # 32 16
# model.add(Dropout(0.25)) # will be better
model.add(Conv2D(filters=nbfilter*2, kernel_size=(1,32), padding='valid', activation='relu', strides=1))
# model.add(Flatten())
#model.add(Softmax4D(axis=1))
#model.add(MaxPooling1D(pool_length=3))
#model.add(Flatten())
#model.add(Recalc(axis=1))
# model.add(Flatten())
# model.add(Dense(nbfilter*2, activation='relu'))
model.add(Dropout(0.25))
model.add(Conv2D(filters=1, kernel_size=(1,1), padding='valid', activation='sigmoid', strides=1))
return model
def discriminator_model():
model = Sequential()
model.add(
Conv2D(64, (5, 5),
padding='same',
input_shape=(28, 28, 1))
)
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (5, 5)))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1024))
model.add(Activation('tanh'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
def create_model(img_height,img_width,img_channel):
ip = Input(shape=(img_height, img_width,img_channel))
L1 = Conv2D(32, (11, 11), padding='same', activation='relu', kernel_initializer='glorot_uniform')(ip)
L2 = Conv2D(64, (3, 3), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L1)
L3 = Conv2D(64, (3, 3), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L2)
L4 = Conv2D(64, (3, 3), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L3)
L4=concatenate([L4,L1],axis=-1)#Attention!.maybe this connection will influence the result,which means it can be moved.
L5 = Conv2D(64, (1, 1), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L4)
L6 = Conv2D(64, (5, 5), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L5)
L6=concatenate([L6,L1],axis=-1)#Attention!.maybe this connection will influence the result,which means it can be moved.
L7 = Conv2D(128, (1, 1), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L6)
L8 = Conv2D(img_channel, (5, 5), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L7)
deblocking =Model(inputs=ip,outputs= L8)
optimizer = optimizers.Adam(lr=1e-4)
deblocking.compile(optimizer=optimizer,loss='mean_squared_error', metrics=[psnr,ssim])
return deblocking
def create_model(img_height,img_width,img_channel):
ip = Input(shape=(img_height, img_width,img_channel))
L_1 = Conv2D(64, (9, 9), padding='same', activation='linear', kernel_initializer='glorot_uniform')(ip)
L_1 = LeakyReLU(alpha=0.25)(L_1)
L_2=L_1
for i in range(3):
L_2 = residual_block(L_2, 64,3)
L_3 = Conv2D(64, (3, 3), padding='same',kernel_initializer='glorot_uniform')(L_2)
L_3 = BatchNormalization(axis=-1)(L_3)
L_3 = add([L_1,L_3])
L_4= Conv2D(128, (1, 1), padding='same',kernel_initializer='glorot_uniform')(L_3)
op = Conv2D(img_channel, (9, 9),padding='same', activation='tanh', kernel_initializer='glorot_uniform')(L_4)
deblocking =Model(inputs=ip,outputs= op)
optimizer = optimizers.Adam(lr=1e-4)
deblocking.compile(optimizer=optimizer,loss='mean_squared_error', metrics=[psnr,ssim])
return deblocking
def create_model(img_height,img_width,img_channel):
ip = Input(shape=(img_height, img_width,img_channel))
x_1 = Conv2D(64, (9, 9), padding='same', activation='linear', kernel_initializer='glorot_uniform')(ip)
x_1 = LeakyReLU(alpha=0.25)(x_1)
x=x_1
for i in range(5):#or 15
x = residual_block(x, 64,3)
x = Conv2D(64, (3, 3), padding='same',kernel_initializer='glorot_uniform')(x)
x = BatchNormalization(axis=-1)(x)
x = add([x_1,x])
x=upscale(x)
op = Conv2D(img_channel, (9, 9),padding='same', activation='tanh', kernel_initializer='glorot_uniform')(x)
deblocking =Model(inputs=ip,outputs= op)
optimizer = optimizers.Adam(lr=1e-4)
deblocking.compile(optimizer=optimizer,loss='mean_squared_error', metrics=[psnr,ssim])
return deblocking
def create_model(img_height,img_width,img_channel):
ip = Input(shape=(img_height, img_width,img_channel))
L1 = Conv2D(32, (11, 11), padding='same', activation='relu', kernel_initializer='glorot_uniform')(ip)
L2 = Conv2D(64, (3, 3), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L1)
L3 = Conv2D(64, (3, 3), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L2)
L4 = Conv2D(64, (3, 3), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L3)
L4=concatenate([L4,L1],axis=-1)
L5 = Conv2D(64, (1, 1), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L4)
L6 = Conv2D(64, (5, 5), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L5)
L6=concatenate([L6,L1],axis=-1)
L7 = Conv2D(128, (1, 1), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L6)
L8 = Conv2D(img_channel, (5, 5), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L7)
deblocking =Model(inputs=ip,outputs= L8)
optimizer = optimizers.Adam(lr=1e-4)
deblocking.compile(optimizer=optimizer,loss='mean_squared_error', metrics=[psnr,ssim])
return deblocking
def create_model(img_height,img_width,img_channel):
ip = Input(shape=(img_height, img_width,img_channel))
x = Conv2D(64, (9, 9), padding='same', activation='linear', kernel_initializer='glorot_uniform')(ip)
x = BatchNormalization(axis= -1)(x)
x = LeakyReLU(alpha=0.25)(x)
for i in range(5):
x = residual_block(x, 64,3)
x = Conv2D(64, (3, 3), padding='same',kernel_initializer='glorot_uniform')(x)
x = BatchNormalization(axis=-1)(x)
x=Conv2D(64,(3, 3),padding='same',activation='relu')(x)
op=Conv2D(img_channel,(9,9),padding='same',activation='tanh',kernel_initializer='glorot_uniform')(x)
deblocking =Model(inputs=ip,outputs= op)
optimizer = optimizers.Adam(lr=1e-4)
deblocking.compile(optimizer=optimizer,loss='mean_squared_error', metrics=[psnr,ssim])
return deblocking
#plot_model(deblocking, to_file='model.png', show_shapes=True, show_layer_names=True)
def make_network():
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', input_shape=(128, 128, 3)))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
# model.add(Activation('tanh'))
return model
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps
in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
x = Activation('relu')(x)
x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x
def __initial_conv_block_imagenet(input, weight_decay=5e-4):
''' Adds an initial conv block, with batch norm and relu for the inception resnext
Args:
input: input tensor
weight_decay: weight decay factor
Returns: a keras tensor
'''
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = Conv2D(64, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), strides=(2, 2))(input)
x = BatchNormalization(axis=channel_axis)(x)
x = LeakyReLU()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
return x
def build(input_shape, classes):
model = Sequential()
# CONV => RELU => POOL
model.add(Conv2D(20, kernel_size=5, padding="same",
input_shape=input_shape))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# CONV => RELU => POOL
model.add(Conv2D(50, kernel_size=5, padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Flatten => RELU layers
model.add(Flatten())
model.add(Dense(500))
model.add(Activation("relu"))
# a softmax classifier
model.add(Dense(classes))
model.add(Activation("softmax"))
return model
# network and training
def make_network():
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', input_shape=(128, 128, 3)))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(11))
model.add(Activation('softmax'))
return model
def ConvBN2(x, num_filter, stride_size=3):
x = Conv2D(num_filter, (stride_size, stride_size), padding='same', kernel_initializer='he_uniform')(x)
x = BatchNormalization()(x)
x = Activation('selu')(x)
x = Conv2D(num_filter, (stride_size, stride_size), padding='same', kernel_initializer='he_uniform')(x)
x = BatchNormalization()(x)
x = Activation('selu')(x)
return x
def ConvBN2(x, num_filter, stride_size=3):
x = Conv2D(num_filter, (stride_size, stride_size), padding='same', kernel_initializer='he_uniform')(x)
x = BatchNormalization()(x)
x = Activation('selu')(x)
x = Conv2D(num_filter, (stride_size, stride_size), padding='same', kernel_initializer='he_uniform')(x)
x = BatchNormalization()(x)
x = Activation('selu')(x)
return x
def ConvBN2(x, num_filter, stride_size=3):
x = Conv2D(num_filter, (stride_size, stride_size), padding='same', kernel_initializer='he_uniform')(x)
x = BatchNormalization()(x)
x = Activation('selu')(x)
x = Conv2D(num_filter, (stride_size, stride_size), padding='same', kernel_initializer='he_uniform')(x)
x = BatchNormalization()(x)
x = Activation('selu')(x)
return x
def get_unet0(num_start_filters=32):
inputs = Input((img_rows, img_cols, num_channels))
conv1 = ConvBN2(inputs, num_start_filters)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = ConvBN2(pool1, 2 * num_start_filters)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = ConvBN2(pool2, 4 * num_start_filters)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = ConvBN2(pool3, 8 * num_start_filters)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = ConvBN2(pool4, 16 * num_start_filters)
up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4])
conv6 = ConvBN2(up6, 8 * num_start_filters)
up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3])
conv7 = ConvBN2(up7, 4 * num_start_filters)
up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2])
conv8 = ConvBN2(up8, 2 * num_start_filters)
up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1])
conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(up9)
conv9 = BatchNormalization()(conv9)
conv9 = Activation('selu')(conv9)
conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(conv9)
crop9 = Cropping2D(cropping=((16, 16), (16, 16)))(conv9)
conv9 = BatchNormalization()(crop9)
conv9 = Activation('selu')(conv9)
conv10 = Conv2D(num_mask_channels, (1, 1))(conv9)
model = Model(inputs=inputs, outputs=conv10)
return model