def fcn_32s(input_dim, nb_classes=2):
inputs = Input(shape=(input_dim,input_dim,3))
vgg16 = VGG16(weights=None, include_top=False, input_tensor=inputs)
pretrain_model_path = "../weights/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5"
if not os.path.exists(pretrain_model_path):
raise RuntimeError("No pretrained model loaded.")
vgg16.load_weights(pretrain_model_path)
x = Conv2D(filters=nb_classes,
kernel_size=(1, 1))(vgg16.output)
x = Conv2DTranspose(filters=nb_classes,
kernel_size=(64, 64),
strides=(32, 32),
padding='same',
activation='sigmoid',
kernel_initializer=initializers.Constant(bilinear_upsample_weights(32, nb_classes)))(x)
model = Model(inputs=inputs, outputs=x)
for layer in model.layers[:15]:
layer.trainable = False
return model
python类Conv2DTranspose()的实例源码
def get_model():
inputs = Input(shape=(64, 64, 3))
conv_1 = Conv2D(1, (3, 3), strides=(1, 1), padding='same')(inputs)
act_1 = Activation('relu')(conv_1)
conv_2 = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(act_1)
act_2 = Activation('relu')(conv_2)
deconv_1 = Conv2DTranspose(64, (3, 3), strides=(1, 1), padding='same')(act_2)
act_3 = Activation('relu')(deconv_1)
merge_1 = concatenate([act_3, act_1], axis=3)
deconv_2 = Conv2DTranspose(1, (3, 3), strides=(1, 1), padding='same')(merge_1)
act_4 = Activation('relu')(deconv_2)
model = Model(inputs=[inputs], outputs=[act_4])
model.compile(optimizer='adadelta', loss=dice_coef_loss, metrics=[dice_coef])
return model
def test_keras_export(self):
tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
'keras_export_test.json'), 'r')
response = json.load(tests)
tests.close()
net = yaml.safe_load(json.dumps(response['net']))
net = {'l0': net['Input'], 'l1': net['Deconvolution']}
net['l0']['connection']['output'].append('l1')
# Test 1
inp = data(net['l0'], '', 'l0')['l0']
temp = deconvolution(net['l1'], [inp], 'l1')
model = Model(inp, temp['l1'])
self.assertEqual(model.layers[2].__class__.__name__, 'Conv2DTranspose')
# Test 2
net['l1']['params']['weight_filler'] = 'xavier'
net['l1']['params']['bias_filler'] = 'xavier'
inp = data(net['l0'], '', 'l0')['l0']
temp = deconvolution(net['l1'], [inp], 'l1')
model = Model(inp, temp['l1'])
self.assertEqual(model.layers[2].__class__.__name__, 'Conv2DTranspose')
def test_tiny_deconv_random(self):
# In Keras 2, deconvolution auto computes the output shape.
np.random.seed(1988)
input_dim = 13
input_shape = (input_dim, input_dim, 5)
num_kernels = 16
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(Conv2DTranspose(filters = num_kernels, kernel_size=(kernel_height, kernel_width),
input_shape = input_shape, padding = 'valid', strides = (2,2)))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_keras_model(model)
def test_tiny_deconv_random_same_padding(self):
np.random.seed(1988)
input_dim = 14
input_shape = (input_dim, input_dim, 3)
num_kernels = 16
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(Conv2DTranspose(filters = num_kernels, kernel_size=(kernel_height, kernel_width),
input_shape = input_shape, padding = 'same', strides = (2,2)))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_keras_model(model)
def fsrcnn(x, d=56, s=12, m=4, scale=3):
"""Build an FSRCNN model.
See https://arxiv.org/abs/1608.00367
"""
model = Sequential()
model.add(InputLayer(input_shape=x.shape[-3:]))
c = x.shape[-1]
f = [5, 1] + [3] * m + [1]
n = [d, s] + [s] * m + [d]
for ni, fi in zip(n, f):
model.add(Conv2D(ni, fi, padding='same',
kernel_initializer='he_normal', activation='relu'))
model.add(Conv2DTranspose(c, 9, strides=scale, padding='same',
kernel_initializer='he_normal'))
return model
def nsfsrcnn(x, d=56, s=12, m=4, scale=3, pos=1):
"""Build an FSRCNN model, but change deconv position.
See https://arxiv.org/abs/1608.00367
"""
model = Sequential()
model.add(InputLayer(input_shape=x.shape[-3:]))
c = x.shape[-1]
f1 = [5, 1] + [3] * pos
n1 = [d, s] + [s] * pos
f2 = [3] * (m - pos - 1) + [1]
n2 = [s] * (m - pos - 1) + [d]
f3 = 9
n3 = c
for ni, fi in zip(n1, f1):
model.add(Conv2D(ni, fi, padding='same',
kernel_initializer='he_normal', activation='relu'))
model.add(Conv2DTranspose(s, 3, strides=scale, padding='same',
kernel_initializer='he_normal'))
for ni, fi in zip(n2, f2):
model.add(Conv2D(ni, fi, padding='same',
kernel_initializer='he_normal', activation='relu'))
model.add(Conv2D(n3, f3, padding='same',
kernel_initializer='he_normal'))
return model
def fcn_Resnet50(input_shape = None, weight_decay=0.0002, batch_momentum=0.9, batch_shape=None, classes=40):
img_input = Input(shape=input_shape)
bn_axis = 3
x = Conv2D(64, kernel_size=(7,7), subsample=(2, 2), border_mode='same', name='conv1', W_regularizer=l2(weight_decay))(img_input)
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(3, [64, 64, 256], stage=2, block='a', strides=(1, 1))(x)
x = identity_block(3, [64, 64, 256], stage=2, block='b')(x)
x = identity_block(3, [64, 64, 256], stage=2, block='c')(x)
x = conv_block(3, [128, 128, 512], stage=3, block='a')(x)
x = identity_block(3, [128, 128, 512], stage=3, block='b')(x)
x = identity_block(3, [128, 128, 512], stage=3, block='c')(x)
x = identity_block(3, [128, 128, 512], stage=3, block='d')(x)
x = conv_block(3, [256, 256, 1024], stage=4, block='a')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='b')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='c')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='d')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='e')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='f')(x)
x = conv_block(3, [512, 512, 2048], stage=5, block='a')(x)
x = identity_block(3, [512, 512, 2048], stage=5, block='b')(x)
x = identity_block(3, [512, 512, 2048], stage=5, block='c')(x)
#classifying layer
x = Conv2D(filters=40, kernel_size=(1,1), strides=(1,1), init='he_normal', activation='linear', border_mode='valid', W_regularizer=l2(weight_decay))(x)
x = Conv2DTranspose(filters=40, kernel_initializer='he_normal', kernel_size=(64, 64), strides=(32, 32), padding='valid',use_bias=False, name='upscore2')(x)
x = Cropping2D(cropping=((19, 36),(19, 29)), name='score')(x)
model = Model(img_input, x)
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5', RES_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
model.load_weights(weights_path, by_name=True)
return model
def test_keras_import(self):
model = Sequential()
model.add(Conv2DTranspose(32, (3, 3), kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
bias_constraint='max_norm', activation='relu', input_shape=(1, 16, 16)))
model.build()
self.keras_param_test(model, 1, 13)
def deconvolution(layer, layer_in, layerId):
out = {}
padding = get_padding(layer)
k_h, k_w = layer['params']['kernel_h'], layer['params']['kernel_w']
s_h, s_w = layer['params']['stride_h'], layer['params']['stride_w']
d_h, d_w = layer['params']['dilation_h'], layer['params']['dilation_w']
if (layer['params']['weight_filler'] in fillerMap):
kernel_initializer = fillerMap[layer['params']['weight_filler']]
else:
kernel_initializer = layer['params']['weight_filler']
if (layer['params']['bias_filler'] in fillerMap):
bias_initializer = fillerMap[layer['params']['bias_filler']]
else:
bias_initializer = layer['params']['bias_filler']
filters = layer['params']['num_output']
if (padding == 'custom'):
p_h, p_w = layer['params']['pad_h'], layer['params']['pad_w']
out[layerId + 'Pad'] = ZeroPadding2D(padding=(p_h, p_w))(*layer_in)
padding = 'valid'
layer_in = [out[layerId + 'Pad']]
kernel_regularizer = regularizerMap[layer['params']['kernel_regularizer']]
bias_regularizer = regularizerMap[layer['params']['bias_regularizer']]
activity_regularizer = regularizerMap[layer['params']['activity_regularizer']]
kernel_constraint = constraintMap[layer['params']['kernel_constraint']]
bias_constraint = constraintMap[layer['params']['bias_constraint']]
use_bias = layer['params']['use_bias']
out[layerId] = Conv2DTranspose(filters, [k_h, k_w], strides=(s_h, s_w), padding=padding,
dilation_rate=(d_h, d_w), kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer, use_bias=use_bias,
bias_constraint=bias_constraint,
kernel_constraint=kernel_constraint)(*layer_in)
return out
def generator(self):
if self.G:
return self.G
self.G = Sequential()
dropout = 0.4
depth = 64+64+64+64
dim = 7
# In: 100
# Out: dim x dim x depth
self.G.add(Dense(dim*dim*depth, input_dim=100))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
self.G.add(Reshape((dim, dim, depth)))
self.G.add(Dropout(dropout))
# In: dim x dim x depth
# Out: 2*dim x 2*dim x depth/2
self.G.add(UpSampling2D())
self.G.add(Conv2DTranspose(int(depth/2), 5, padding='same'))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
self.G.add(UpSampling2D())
self.G.add(Conv2DTranspose(int(depth/4), 5, padding='same'))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
self.G.add(Conv2DTranspose(int(depth/8), 5, padding='same'))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
# Out: 28 x 28 x 1 grayscale image [0.0,1.0] per pix
self.G.add(Conv2DTranspose(1, 5, padding='same'))
self.G.add(Activation('sigmoid'))
self.G.summary()
return self.G
def scaleup(input, ngf, kss, strides, padding):
# x = Conv2DTranspose(ngf, kss, strides=strides, padding=padding)(input)
# upsample + conv
x = UpSampling2D(strides)(input)
x = Conv2D(ngf, kss, padding=padding)(x)
return x
def get_fcn_vgg16_32s(inputs, n_classes):
x = BatchNormalization()(inputs)
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
x = Conv2D(512, (3, 3), activation='relu', padding="same")(x)
x = Conv2DTranspose(n_classes, kernel_size=(64, 64), strides=(32, 32), activation='linear', padding='same')(x)
return x
def get_fcn_vgg16_32s(inputs, n_classes):
x = BatchNormalization()(inputs)
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
x = Conv2D(512, (3, 3), activation='relu', padding="same")(x)
x = Conv2DTranspose(n_classes, kernel_size=(64, 64), strides=(32, 32), activation='linear', padding='same')(x)
return x
def generator(self):
if self.G:
return self.G
self.G = Sequential()
dropout = 0.4
depth = 64+64+64+64
dim = 7
# In: 100
# Out: dim x dim x depth
self.G.add(Dense(dim*dim*depth, input_dim=100))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
self.G.add(Reshape((dim, dim, depth)))
self.G.add(Dropout(dropout))
# In: dim x dim x depth
# Out: 2*dim x 2*dim x depth/2
self.G.add(UpSampling2D())
self.G.add(Conv2DTranspose(int(depth/2), 5, padding='same'))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
self.G.add(UpSampling2D())
self.G.add(Conv2DTranspose(int(depth/4), 5, padding='same'))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
self.G.add(Conv2DTranspose(int(depth/8), 5, padding='same'))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
# Out: 28 x 28 x 1 grayscale image [0.0,1.0] per pix
self.G.add(Conv2DTranspose(1, 5, padding='same'))
self.G.add(Activation('sigmoid'))
self.G.summary()
return self.G
def test_transposed_conv(self):
keras_model = Sequential()
keras_model.add(Conv2DTranspose(32, (2, 2), strides=(
2, 2), input_shape=(3, 32, 32), name='trans'))
keras_model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD())
pytorch_model = TransposeNet()
self.transfer(keras_model, pytorch_model)
self.assertEqualPrediction(keras_model, pytorch_model, self.test_data)
# Tests special activation function
def model_EES16():
_input = Input(shape=(None, None, 1), name='input')
EES = Conv2D(filters=16, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(_input)
EES = Conv2DTranspose(filters=32, kernel_size=(14, 14), strides=(2, 2), padding='same', activation='relu')(EES)
out = Conv2D(filters=1, kernel_size=(5, 5), strides=(1, 1), activation='relu', padding='same')(EES)
model = Model(input=_input, output=out)
return model
def model_EES():
_input = Input(shape=(None, None, 1), name='input')
EES = Conv2D(filters=4, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(_input)
EES = Conv2DTranspose(filters=8, kernel_size=(14, 14), strides=(2, 2), padding='same', activation='relu')(EES)
out = Conv2D(filters=1, kernel_size=(5, 5), strides=(1, 1), activation='relu', padding='same')(EES)
model = Model(input=_input, output=out)
return model
def fcn_vggbase(input_shape=(None,None,3)):
img_input = Input(shape=input_shape)
x = ZeroPadding2D(padding=(100, 100), name='pad1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same',name='block5_pool')(x)
x = Conv2D(filters=4096, kernel_size=(7, 7), W_regularizer=l2(0.00005), activation='relu', padding='valid', name='fc6_lsun')(x)
x = Dropout(0.85)(x)
x = Conv2D(filters=4096, kernel_size=(1, 1), W_regularizer=l2(0.00005), activation='relu', padding='valid', name='fc7_lsun')(x)
x = Dropout(0.85)(x)
x = Conv2D(filters=5, kernel_size=(1, 1), strides=(1,1), kernel_initializer='he_normal', padding='valid', name='lsun_score')(x)
x = Conv2DTranspose(filters=5, kernel_initializer='he_normal', kernel_size=(64, 64), strides=(32, 32), padding='valid',use_bias=False, name='lsun_upscore2')(x)
output = _crop(img_input,offset=(32,32), name='score')(x)
model = Model(img_input, output)
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', VGG_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
model.load_weights(weights_path, by_name=True)
return model
def fcn16s_vggbase(input_shape=None, nb_class=None):
img_input = Input(shape=input_shape)
x = ZeroPadding2D(padding=(100, 100), name='pad1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block4_pool')(x)
pool4 = x
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same',name='block5_pool')(x)
x = Conv2D(filters=4096, kernel_size=(7, 7), W_regularizer=l2(0.00005), activation='relu', padding='valid', name='fc6')(x)
x = Dropout(0.85)(x)
x = Conv2D(filters=4096, kernel_size=(1, 1), W_regularizer=l2(0.00005), activation='relu', padding='valid', name='fc7')(x)
x = Dropout(0.85)(x)
x = Conv2D(filters=nb_class, kernel_size=(1, 1), strides=(1,1), kernel_initializer='he_normal', padding='valid', name='p5score')(x)
x = Conv2DTranspose(filters=nb_class, kernel_size=(4,4), strides=(2,2), kernel_initializer='he_normal', padding='valid', name='p5upscore')(x)
pool4 = Conv2D(filters=nb_class, kernel_size=(1,1), kernel_initializer='he_normal', padding='valid', name='pool4_score')(pool4)
pool4_score = _crop(x, offset=(5,5), name='pool4_score2')(pool4)
m = merge([pool4_score,x], mode='sum')
upscore = Conv2DTranspose(filters=nb_class, kernel_size=(32,32), strides=(16,16), padding='valid', name='merged_score')(m)
score = _crop(img_input, offset=(27,27), name='output_score')(upscore)
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', VGG_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
mdl = Model(img_input, score, name='fcn16s')
mdl.load_weights(weights_path, by_name=True)
return mdl
def dilated_FCN_addmodule(input_shape=None):
img_input = Input(shape=input_shape)
x = ZeroPadding2D(padding=(100, 100), name='pad1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same',name='block5_pool')(x)
x = Conv2D(filters=4096, kernel_initializer='he_normal', kernel_size=(7, 7), activation='relu', padding='valid', name='fc6')(x)
x = Dropout(0.85)(x)
x = Conv2D(filters=4096, kernel_initializer='he_normal', kernel_size=(1, 1), activation='relu', padding='valid', name='fc7')(x)
x = Dropout(0.85)(x)
x = Conv2D(filters=40,kernel_size=(1, 1), strides=(1,1), kernel_initializer='he_normal', padding='valid', name='score_fr')(x)
#x = Cropping2D(cropping=((19, 36),(19, 29)), name='score')(x)
x = ZeroPadding2D(padding=(33,33))(x)
x = Conv2D(2*40, (3,3), kernel_initializer='he_normal',activation='relu', name='dl_conv1')(x)
x = Conv2D(2*40, (3,3), kernel_initializer='he_normal',activation='relu', name='dl_conv2')(x)
x = Conv2D(4*40, (3,3), kernel_initializer='he_normal',dilation_rate=(2,2), activation='relu', name='dl_conv3')(x)
x = Conv2D(8*40, (3,3), kernel_initializer='he_normal',dilation_rate=(4,4), activation='relu', name='dl_conv4')(x)
x = Conv2D(16*40, (3,3), kernel_initializer='he_normal',dilation_rate=(8,8), activation='relu', name='dl_conv5')(x)
x = Conv2D(32*40, (3,3), kernel_initializer='he_normal',dilation_rate=(16,16), activation='relu', name='dl_conv6')(x)
x = Conv2D(32*40, (1,1), kernel_initializer='he_normal',name='dl_conv7')(x)
x = Conv2D(1*40, (1,1), kernel_initializer='he_normal',name='dl_final')(x)
x = Conv2DTranspose(filters=40, kernel_initializer='he_normal', kernel_size=(64, 64), strides=(32, 32), padding='valid',use_bias=False, name='upscore2')(x)
x = CroppingLike2D(img_input, offset='centered', name='score')(x)
mdl = Model(img_input, x, name='dilatedmoduleFCN')
#weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', VGG_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
mdl.load_weights('logs/model_June13_sgd_60kitr.h5', by_name=True)
return mdl
def dilated_FCN_frontended(input_shape=None, weight_decay=None, nb_classes=40):
img_input = Input(shape=input_shape)
#x = ZeroPadding2D(padding=(100, 100), name='pad1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = Conv2D(512, (3,3), dilation_rate=(2,2), activation='relu', name='block5_conv1')(x)
x = Conv2D(512, (3,3), dilation_rate=(2,2), activation='relu', name='block5_conv2')(x)
x = Conv2D(512, (3,3), dilation_rate=(2,2), activation='relu', name='block5_conv3')(x)
x = Conv2D(4096, (3,3), kernel_initializer='he_normal', dilation_rate=(4,4), activation='relu', name='fc6')(x)
x = Dropout(0.5, name='drop6')(x)
x = Conv2D(4096, (1,1), kernel_initializer='he_normal', activation='relu', name='fc7')(x)
x = Dropout(0.5, name='drop7')(x)
x = Conv2D(nb_classes, (1,1), kernel_initializer='he_normal', activation='relu', name='fc_final')(x)
#x = Conv2DTranspose(nb_classes, kernel_size=(64,64), strides=(32,32), padding='valid', name='upscore2')(x)
x = ZeroPadding2D(padding=(33,33))(x)
x = Conv2D(2*nb_classes, (3,3), kernel_initializer='he_normal',activation='relu', name='dl_conv1')(x)
x = Conv2D(2*nb_classes, (3,3), kernel_initializer='he_normal',activation='relu', name='dl_conv2')(x)
x = Conv2D(4*nb_classes, (3,3), kernel_initializer='he_normal',dilation_rate=(2,2), activation='relu', name='dl_conv3')(x)
x = Conv2D(8*nb_classes, (3,3), kernel_initializer='he_normal',dilation_rate=(4,4), activation='relu', name='dl_conv4')(x)
x = Conv2D(16*nb_classes, (3,3), kernel_initializer='he_normal',dilation_rate=(8,8), activation='relu', name='dl_conv5')(x)
x = Conv2D(32*nb_classes, (3,3), kernel_initializer='he_normal',dilation_rate=(16,16), activation='relu', name='dl_conv6')(x)
x = Conv2D(32*nb_classes, (1,1), kernel_initializer='he_normal',name='dl_conv7')(x)
x = Conv2D(1*nb_classes, (1,1), kernel_initializer='he_normal',name='dl_final')(x)
x = Conv2DTranspose(nb_classes, kernel_initializer='he_normal', kernel_size=(64,64), strides=(8,8), padding='valid', name='upscore2')(x)
x = CroppingLike2D(img_input, offset='centered', name='score')(x)
#x = Cropping2D(cropping=((19,36), (19,29)), name='score')(x)
mdl = Model(input=img_input, output=x, name='dilated_fcn')
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', VGG_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
mdl.load_weights(weights_path, by_name=True)
return mdl
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4, block_prefix=None):
'''Adds an upsampling block. Upsampling operation relies on the the type parameter.
# Arguments
ip: input keras tensor
nb_filters: integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution)
type: can be 'upsampling', 'subpixel', 'deconv'. Determines
type of upsampling performed
weight_decay: weight decay factor
block_prefix: str, for block unique naming
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, nb_filter, rows * 2, cols * 2)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows * 2, cols * 2, nb_filter)` if data_format='channels_last'.
# Returns
a keras tensor
'''
with K.name_scope('TransitionUp'):
if type == 'upsampling':
x = UpSampling2D(name=name_or_none(block_prefix, '_upsampling'))(ip)
elif type == 'subpixel':
x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay),
use_bias=False, kernel_initializer='he_normal', name=name_or_none(block_prefix, '_conv2D'))(ip)
x = SubPixelUpscaling(scale_factor=2, name=name_or_none(block_prefix, '_subpixel'))(x)
x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay),
use_bias=False, kernel_initializer='he_normal', name=name_or_none(block_prefix, '_conv2D'))(x)
else:
x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay),
name=name_or_none(block_prefix, '_conv2DT'))(ip)
return x
def inflating_convolution(inputs, n_inflation_layers, projection_space_shape=(4, 4, 32), name_prefix=None):
assert len(projection_space_shape) == 3, \
"Projection space shape is {} but should be 3.".format(len(projection_space_shape))
flattened_space_dim = prod(projection_space_shape)
projection = Dense(flattened_space_dim, activation=None, name=name_prefix + '_projection')(inputs)
reshape = Reshape(projection_space_shape, name=name_prefix + '_reshape')(projection)
depth = projection_space_shape[2]
inflated = Conv2DTranspose(filters=min(32, depth // 2), kernel_size=(5, 5), strides=(2, 2), activation='relu',
padding='same', name=name_prefix + '_transposed_conv_0')(reshape)
for i in range(1, n_inflation_layers):
inflated = Conv2DTranspose(filters=max(1, depth // 2**(i+1)), kernel_size=(5, 5),
strides=(2, 2), activation='relu', padding='same',
name=name_prefix + '_transpose_conv_{}'.format(i))(inflated)
return inflated
def resnet_6blocks(input_shape, output_nc, ngf, **kwargs):
ks = 3
f = 7
p = (f-1)/2
input = Input(input_shape)
# local e1 = data - nn.SpatialReflectionPadding(p, p, p, p) - nn.SpatialConvolution(3, ngf, f, f, 1, 1) - normalization(ngf) - nn.ReLU(true)
x = padding((p,p))(input)
x = Conv2D(ngf, (f,f),)(x)
x = normalize()(x)
x = Activation('relu')(x)
# local e2 = e1 - nn.SpatialConvolution(ngf, ngf*2, ks, ks, 2, 2, 1, 1) - normalization(ngf*2) - nn.ReLU(true)
x = Conv2D(ngf*2, (ks,ks), strides=(2,2), padding='same')(x)
x = normalize()(x)
x = Activation('relu')(x)
# local e3 = e2 - nn.SpatialConvolution(ngf*2, ngf*4, ks, ks, 2, 2, 1, 1) - normalization(ngf*4) - nn.ReLU(true)
x = Conv2D(ngf*4, (ks,ks), strides=(2,2), padding='same')(x)
x = normalize()(x)
x = Activation('relu')(x)
# local d1 = e3 - build_res_block(ngf*4, padding_type) - build_res_block(ngf*4, padding_type) - build_res_block(ngf*4, padding_type)
# - build_res_block(ngf*4, padding_type) - build_res_block(ngf*4, padding_type) - build_res_block(ngf*4, padding_type)
x = res_block(x, ngf*4)
x = res_block(x, ngf*4)
x = res_block(x, ngf*4)
x = res_block(x, ngf*4)
x = res_block(x, ngf*4)
x = res_block(x, ngf*4)
# local d2 = d1 - nn.SpatialFullConvolution(ngf*4, ngf*2, ks, ks, 2, 2, 1, 1,1,1) - normalization(ngf*2) - nn.ReLU(true)
# x = Conv2DTranspose(ngf*2, (ks,ks), strides=(2,2), padding='same')(x)
x = scaleup(x, ngf*2, (ks, ks), strides=(2,2), padding='same')
x = normalize()(x)
x = Activation('relu')(x)
# local d3 = d2 - nn.SpatialFullConvolution(ngf*2, ngf, ks, ks, 2, 2, 1, 1,1,1) - normalization(ngf) - nn.ReLU(true)
# x = Conv2DTranspose(ngf, (ks,ks), strides=(2,2), padding='same')(x)
x = scaleup(x, ngf, (ks, ks), strides=(2,2), padding='same')
x = normalize()(x)
x = Activation('relu')(x)
# local d4 = d3 - nn.SpatialReflectionPadding(p, p, p, p) - nn.SpatialConvolution(ngf, output_nc, f, f, 1, 1) - nn.Tanh()
x = padding((p,p))(x)
x = Conv2D(output_nc, (f,f))(x)
x = Activation('tanh')(x)
model = Model(input, x, name=kwargs.get('name',None))
print('Model resnet 6blocks:')
model.summary()
return model
def get_fcn_vgg16_16s(inputs, n_classes):
x = BatchNormalization()(inputs)
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
block_4 = Conv2D(n_classes, (1, 1), activation='relu', padding='same')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
x = Conv2D(512, (3, 3), activation='relu', padding="same")(x)
block_5 = Conv2DTranspose(n_classes, kernel_size=(4, 4), strides=(2, 2), activation='relu', padding='same')(x)
x = add([block_4, block_5])
x = Conv2DTranspose(n_classes, kernel_size=(32, 32), strides=(16, 16), activation='linear', padding='same')(x)
return x
def get_fcn_vgg16_8s(inputs, n_classes):
x = BatchNormalization()(inputs)
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
block_3 = Conv2D(n_classes, (1, 1), activation='relu', padding='same')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
block_4 = Conv2D(n_classes, (1, 1), activation='relu', padding='same')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
x = Conv2D(512, (3, 3), activation='relu', padding="same")(x)
block_5 = Conv2DTranspose(n_classes, kernel_size=(4, 4), strides=(2, 2), activation='relu', padding='same')(x)
sum_1 = add([block_4, block_5])
sum_1 = Conv2DTranspose(n_classes, kernel_size=(4, 4), strides=(2, 2), activation='relu', padding='same')(sum_1)
sum_2 = add([block_3, sum_1])
x = Conv2DTranspose(n_classes, kernel_size=(16, 16), strides=(8, 8), activation='linear', padding='same')(sum_2)
return x
def get_unet(inputs, n_classes):
x = BatchNormalization()(inputs)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)
conv10 = Conv2D(n_classes, (1, 1), activation='linear')(conv9)
return conv10
def get_fcn_vgg16_8s(inputs, n_classes):
x = BatchNormalization()(inputs)
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
block_3 = Conv2D(n_classes, (1, 1), activation='relu', padding='same')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
block_4 = Conv2D(n_classes, (1, 1), activation='relu', padding='same')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
x = Conv2D(512, (3, 3), activation='relu', padding="same")(x)
block_5 = Conv2DTranspose(n_classes, kernel_size=(4, 4), strides=(2, 2), activation='relu', padding='same')(x)
sum_1 = add([block_4, block_5])
sum_1 = Conv2DTranspose(n_classes, kernel_size=(4, 4), strides=(2, 2), activation='relu', padding='same')(sum_1)
sum_2 = add([block_3, sum_1])
x = Conv2DTranspose(n_classes, kernel_size=(16, 16), strides=(8, 8), activation='linear', padding='same')(sum_2)
return x
def get_unet(inputs, n_classes):
x = BatchNormalization()(inputs)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)
conv10 = Conv2D(n_classes, (1, 1), activation='linear')(conv9)
return conv10