def _crop(target_layer, offset=(None, None), name=None):
"""Crop the bottom such that it has the same shape as target_layer."""
def f(input):
width = input._keras_shape[1]
height = input._keras_shape[2]
target_width = target_layer._keras_shape[1]
target_height = target_layer._keras_shape[2]
cropped = Cropping2D(cropping=((offset[0], width - offset[0] - target_width),(offset[1],height - offset[1] - target_height)), name='{}'.format(name))(input)
return cropped
return f
python类Cropping2D()的实例源码
def fcn_Resnet50(input_shape = None, weight_decay=0.0002, batch_momentum=0.9, batch_shape=None, classes=40):
img_input = Input(shape=input_shape)
bn_axis = 3
x = Conv2D(64, kernel_size=(7,7), subsample=(2, 2), border_mode='same', name='conv1', W_regularizer=l2(weight_decay))(img_input)
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(3, [64, 64, 256], stage=2, block='a', strides=(1, 1))(x)
x = identity_block(3, [64, 64, 256], stage=2, block='b')(x)
x = identity_block(3, [64, 64, 256], stage=2, block='c')(x)
x = conv_block(3, [128, 128, 512], stage=3, block='a')(x)
x = identity_block(3, [128, 128, 512], stage=3, block='b')(x)
x = identity_block(3, [128, 128, 512], stage=3, block='c')(x)
x = identity_block(3, [128, 128, 512], stage=3, block='d')(x)
x = conv_block(3, [256, 256, 1024], stage=4, block='a')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='b')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='c')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='d')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='e')(x)
x = identity_block(3, [256, 256, 1024], stage=4, block='f')(x)
x = conv_block(3, [512, 512, 2048], stage=5, block='a')(x)
x = identity_block(3, [512, 512, 2048], stage=5, block='b')(x)
x = identity_block(3, [512, 512, 2048], stage=5, block='c')(x)
#classifying layer
x = Conv2D(filters=40, kernel_size=(1,1), strides=(1,1), init='he_normal', activation='linear', border_mode='valid', W_regularizer=l2(weight_decay))(x)
x = Conv2DTranspose(filters=40, kernel_initializer='he_normal', kernel_size=(64, 64), strides=(32, 32), padding='valid',use_bias=False, name='upscore2')(x)
x = Cropping2D(cropping=((19, 36),(19, 29)), name='score')(x)
model = Model(img_input, x)
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5', RES_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
model.load_weights(weights_path, by_name=True)
return model
def build_errors(states,base,pad,dim,size):
# address the numerical viscosity in swirling
s = K.round(states+viscosity_adjustment)
s = Reshape((dim+2*pad,dim+2*pad,1))(s)
s = Cropping2D(((pad,pad),(pad,pad)))(s)
s = K.reshape(s,[-1,size,base,size,base])
s = K.permute_dimensions(s, [0,1,3,2,4])
s = K.reshape(s,[-1,size,size,1,base,base])
s = K.tile (s,[1, 1, 1, 2, 1, 1,]) # number of panels : 2
allpanels = K.variable(panels)
allpanels = K.reshape(allpanels, [1,1,1,2,base,base])
allpanels = K.tile(allpanels, [K.shape(s)[0], size,size, 1, 1, 1])
def hash(x):
## 2x2 average hashing
x = K.reshape(x, [-1,size,size,2, base//3, 3, base//3, 3])
x = K.mean(x, axis=(5,7))
return K.round(x)
## diff hashing (horizontal diff)
# x1 = x[:,:,:,:,:,:-1]
# x2 = x[:,:,:,:,:,1:]
# d = x1 - x2
# return K.round(d)
## just rounding
# return K.round(x)
## do nothing
# return x
# s = hash(s)
# allpanels = hash(allpanels)
# error = K.binary_crossentropy(s, allpanels)
error = K.abs(s - allpanels)
error = hash(error)
error = K.mean(error, axis=(4,5))
return error
def default_n_linear(num_outputs):
from keras.layers import Input, Dense, merge
from keras.models import Model
from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
from keras.layers import Activation, Dropout, Flatten, Cropping2D, Lambda
img_in = Input(shape=(120,160,3), name='img_in')
x = img_in
x = Cropping2D(cropping=((60,0), (0,0)))(x) #trim 60 pixels off top
x = Lambda(lambda x: x/127.5 - 1.)(x) # normalize and re-center
x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x)
x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x)
x = Convolution2D(64, (5,5), strides=(1,1), activation='relu')(x)
x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)
x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)
x = Flatten(name='flattened')(x)
x = Dense(100, activation='relu')(x)
x = Dropout(.1)(x)
x = Dense(50, activation='relu')(x)
x = Dropout(.1)(x)
outputs = []
for i in range(num_outputs):
outputs.append(Dense(1, activation='linear', name='n_outputs' + str(i))(x))
model = Model(inputs=[img_in], outputs=outputs)
model.compile(optimizer='adam',
loss='mse')
return model
def default_imu(num_outputs, num_imu_inputs):
'''
Notes: this model depends on concatenate which failed on keras < 2.0.8
'''
from keras.layers import Input, Dense
from keras.models import Model
from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
from keras.layers import Activation, Dropout, Flatten, Cropping2D, Lambda
from keras.layers.merge import concatenate
img_in = Input(shape=(120,160,3), name='img_in')
imu_in = Input(shape=(num_imu_inputs,), name="imu_in")
x = img_in
x = Cropping2D(cropping=((60,0), (0,0)))(x) #trim 60 pixels off top
#x = Lambda(lambda x: x/127.5 - 1.)(x) # normalize and re-center
x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x)
x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x)
x = Convolution2D(64, (3,3), strides=(2,2), activation='relu')(x)
x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)
x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)
x = Flatten(name='flattened')(x)
x = Dense(100, activation='relu')(x)
x = Dropout(.1)(x)
y = imu_in
y = Dense(14, activation='relu')(y)
y = Dense(14, activation='relu')(y)
y = Dense(14, activation='relu')(y)
z = concatenate([x, y])
z = Dense(50, activation='relu')(z)
z = Dropout(.1)(z)
z = Dense(50, activation='relu')(z)
z = Dropout(.1)(z)
outputs = []
for i in range(num_outputs):
outputs.append(Dense(1, activation='linear', name='out_' + str(i))(z))
model = Model(inputs=[img_in, imu_in], outputs=outputs)
model.compile(optimizer='adam',
loss='mse')
return model
def test_delete_channels_cropping2d(channel_index, data_format):
layer = Cropping2D([2, 3], data_format=data_format)
layer_test_helper_flatten_2d(layer, channel_index, data_format)
def test_crop_simple(self):
input_shape = (48, 48, 3)
model = Sequential()
model.add(Cropping2D(cropping=((2,5),(2,5)),input_shape=input_shape))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_keras_model(model)
def train_simple_cnn():
model = Sequential(name='model')
shape = X_train[0].shape
topCropPixels = int(float(shape[0] * 0.3))
model.add(Cropping2D(cropping=((topCropPixels, 0), (0, 0)), input_shape=shape))
model.add(Convolution2D(24, 5, 5, subsample=(2, 2), border_mode='same'))
model.add(MaxPooling2D())
model.add(Activation('relu'))
model.add(Dropout(0.4))
model.add(Convolution2D(36, 3, 3, border_mode='same'))
model.add(MaxPooling2D())
model.add(Dropout(0.4))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(100, name="hidden1"))
model.add(Dropout(0.4))
model.add(Activation('relu'))
model.add(Dense(10, name="hidden3"))
model.add(Activation('relu'))
model.add(Dense(1, name='output'))
model.summary()
model.compile(loss='mse',
optimizer=Adam(lr=0.00005),
metrics=['mean_absolute_error'])
history = model.fit(X_train, y_train,
batch_size=128, nb_epoch=200,
verbose=1, validation_split=0.2,
validation_data=(X_test, y_test),
shuffle=True)
print(history)
return model
# Utility function to write out the cropping layer's output to make sure we're doing it right...
def dilated_FCN_addmodule(input_shape=None):
img_input = Input(shape=input_shape)
x = ZeroPadding2D(padding=(100, 100), name='pad1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same',name='block5_pool')(x)
x = Conv2D(filters=4096, kernel_initializer='he_normal', kernel_size=(7, 7), activation='relu', padding='valid', name='fc6')(x)
x = Dropout(0.85)(x)
x = Conv2D(filters=4096, kernel_initializer='he_normal', kernel_size=(1, 1), activation='relu', padding='valid', name='fc7')(x)
x = Dropout(0.85)(x)
x = Conv2D(filters=40,kernel_size=(1, 1), strides=(1,1), kernel_initializer='he_normal', padding='valid', name='score_fr')(x)
#x = Cropping2D(cropping=((19, 36),(19, 29)), name='score')(x)
x = ZeroPadding2D(padding=(33,33))(x)
x = Conv2D(2*40, (3,3), kernel_initializer='he_normal',activation='relu', name='dl_conv1')(x)
x = Conv2D(2*40, (3,3), kernel_initializer='he_normal',activation='relu', name='dl_conv2')(x)
x = Conv2D(4*40, (3,3), kernel_initializer='he_normal',dilation_rate=(2,2), activation='relu', name='dl_conv3')(x)
x = Conv2D(8*40, (3,3), kernel_initializer='he_normal',dilation_rate=(4,4), activation='relu', name='dl_conv4')(x)
x = Conv2D(16*40, (3,3), kernel_initializer='he_normal',dilation_rate=(8,8), activation='relu', name='dl_conv5')(x)
x = Conv2D(32*40, (3,3), kernel_initializer='he_normal',dilation_rate=(16,16), activation='relu', name='dl_conv6')(x)
x = Conv2D(32*40, (1,1), kernel_initializer='he_normal',name='dl_conv7')(x)
x = Conv2D(1*40, (1,1), kernel_initializer='he_normal',name='dl_final')(x)
x = Conv2DTranspose(filters=40, kernel_initializer='he_normal', kernel_size=(64, 64), strides=(32, 32), padding='valid',use_bias=False, name='upscore2')(x)
x = CroppingLike2D(img_input, offset='centered', name='score')(x)
mdl = Model(img_input, x, name='dilatedmoduleFCN')
#weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', VGG_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
mdl.load_weights('logs/model_June13_sgd_60kitr.h5', by_name=True)
return mdl
def dilated_FCN_frontended(input_shape=None, weight_decay=None, nb_classes=40):
img_input = Input(shape=input_shape)
#x = ZeroPadding2D(padding=(100, 100), name='pad1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = Conv2D(512, (3,3), dilation_rate=(2,2), activation='relu', name='block5_conv1')(x)
x = Conv2D(512, (3,3), dilation_rate=(2,2), activation='relu', name='block5_conv2')(x)
x = Conv2D(512, (3,3), dilation_rate=(2,2), activation='relu', name='block5_conv3')(x)
x = Conv2D(4096, (3,3), kernel_initializer='he_normal', dilation_rate=(4,4), activation='relu', name='fc6')(x)
x = Dropout(0.5, name='drop6')(x)
x = Conv2D(4096, (1,1), kernel_initializer='he_normal', activation='relu', name='fc7')(x)
x = Dropout(0.5, name='drop7')(x)
x = Conv2D(nb_classes, (1,1), kernel_initializer='he_normal', activation='relu', name='fc_final')(x)
#x = Conv2DTranspose(nb_classes, kernel_size=(64,64), strides=(32,32), padding='valid', name='upscore2')(x)
x = ZeroPadding2D(padding=(33,33))(x)
x = Conv2D(2*nb_classes, (3,3), kernel_initializer='he_normal',activation='relu', name='dl_conv1')(x)
x = Conv2D(2*nb_classes, (3,3), kernel_initializer='he_normal',activation='relu', name='dl_conv2')(x)
x = Conv2D(4*nb_classes, (3,3), kernel_initializer='he_normal',dilation_rate=(2,2), activation='relu', name='dl_conv3')(x)
x = Conv2D(8*nb_classes, (3,3), kernel_initializer='he_normal',dilation_rate=(4,4), activation='relu', name='dl_conv4')(x)
x = Conv2D(16*nb_classes, (3,3), kernel_initializer='he_normal',dilation_rate=(8,8), activation='relu', name='dl_conv5')(x)
x = Conv2D(32*nb_classes, (3,3), kernel_initializer='he_normal',dilation_rate=(16,16), activation='relu', name='dl_conv6')(x)
x = Conv2D(32*nb_classes, (1,1), kernel_initializer='he_normal',name='dl_conv7')(x)
x = Conv2D(1*nb_classes, (1,1), kernel_initializer='he_normal',name='dl_final')(x)
x = Conv2DTranspose(nb_classes, kernel_initializer='he_normal', kernel_size=(64,64), strides=(8,8), padding='valid', name='upscore2')(x)
x = CroppingLike2D(img_input, offset='centered', name='score')(x)
#x = Cropping2D(cropping=((19,36), (19,29)), name='score')(x)
mdl = Model(input=img_input, output=x, name='dilated_fcn')
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', VGG_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
mdl.load_weights(weights_path, by_name=True)
return mdl
def build_fcn(X):
#
# DESCRIPTION
# KERAS FCN DEFINITION
# Using the shape of the input to setup the input layer we create a FCN with 2 skips
#
# INPUTS
# X [number_of_images, 400, 400, channels]
#
# OUTPUTS
# model uninstantiated Keras model
#
img_rows, img_cols = 400, 400
inputs = Input(shape=X.shape[1:])
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Convolution2D(32, 4, 4, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, 4, 4, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv3) # 50 50
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv4) # 25 25
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
pool5 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv5)
drop3 = Dropout(0.5)(pool5)
convpool3 = Convolution2D(60, 1, 1, activation='relu', border_mode='same')(pool3)
convpool4 = Convolution2D(60, 1, 1, activation='relu', border_mode='same')(pool4)
convdrop3 = Convolution2D(60, 1, 1, activation='relu', border_mode='same')(drop3)
drop3x5 = UpSampling2D(size=(5, 5))(convdrop3)
croppeddrop3x5 = Cropping2D(((5,5),(5,5)))(drop3x5) # 50 50
pool4x2 = UpSampling2D(size=(2, 2))(convpool4) # 50 50
fuse2 = merge([convpool3, pool4x2, croppeddrop3x5], mode='concat', concat_axis=-1) # 50 50 4224
upscore3 = UpSampling2D(size=(8, 8))(fuse2) # F 8s
convscore3 = Convolution2D(1, 1, 1, activation='sigmoid')(upscore3)
# Instantiate Model object
model = Model(input=inputs, output=convscore3)
sgd = SGD(lr=1e-5, decay=2, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss=pixel_wise_loss, metrics=['mean_squared_error'])
#model.compile(loss='mean_squared_error', optimizer=sgd)
return model
## CUSTOM LOSS FUNCTION
def _adjust_block(p, ip, filters, weight_decay=5e-5, id=None):
'''
Adjusts the input `p` to match the shape of the `input`
or situations where the output number of filters needs to
be changed
# Arguments:
p: input tensor which needs to be modified
ip: input tensor whose shape needs to be matched
filters: number of output filters to be matched
weight_decay: l2 regularization weight
id: string id
# Returns:
an adjusted Keras tensor
'''
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
img_dim = 2 if K.image_data_format() == 'channels_first' else -2
with K.name_scope('adjust_block'):
if p is None:
p = ip
elif p._keras_shape[img_dim] != ip._keras_shape[img_dim]:
with K.name_scope('adjust_reduction_block_%s' % id):
p = Activation('relu', name='adjust_relu_1_%s' % id)(p)
p1 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name='adjust_avg_pool_1_%s' % id)(p)
p1 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),
name='adjust_conv_1_%s' % id, kernel_initializer='he_normal')(p1)
p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2)
p2 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name='adjust_avg_pool_2_%s' % id)(p2)
p2 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),
name='adjust_conv_2_%s' % id, kernel_initializer='he_normal')(p2)
p = concatenate([p1, p2], axis=channel_dim)
p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name='adjust_bn_%s' % id)(p)
elif p._keras_shape[channel_dim] != filters:
with K.name_scope('adjust_projection_block_%s' % id):
p = Activation('relu')(p)
p = Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name='adjust_conv_projection_%s' % id,
use_bias=False, kernel_regularizer=l2(weight_decay), kernel_initializer='he_normal')(p)
p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name='adjust_bn_%s' % id)(p)
return p
def get_model():
"""
Defines the CNN model architecture and returns the model.
The architecture is the same as I developed for project 2
https://github.com/neerajdixit/Traffic-Sign-classifier-with-Deep-Learning
with an additional normalization layer in front and
a final fully connected layer of size 5 since we have 5 different type of objects in our data set.
"""
# Create a Keras sequential model
model = Sequential()
#model.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=(160,320,3)))
# Add a normalization layer to normalize between -0.5 and 0.5.
model.add(Lambda(lambda x: x / 255. - .5,input_shape=(im_x,im_y,im_z), name='norm'))
# Add a convolution layer with Input = 32x32x3. Output = 30x30x6. Strides 1 and VALID padding.
# Perform RELU activation
model.add(Convolution2D(6, 3, 3, subsample=(1, 1), border_mode="valid", activation='relu', name='conv1'))
# Add a convolution layer with Input = 30x30x6. Output = 28x28x9. Strides 1 and VALID padding.
# Perform RELU activation
model.add(Convolution2D(9, 3, 3, subsample=(1, 1), border_mode="valid", activation='relu', name='conv2'))
# Add Pooling layer with Input = 28x28x9. Output = 14x14x9. 2x2 kernel, Strides 2 and VALID padding
model.add(MaxPooling2D(pool_size=(2, 2), border_mode='valid', name='pool1'))
# Add a convolution layer with Input 14x14x9. Output = 12x12x12. Strides 1 and VALID padding.
# Perform RELU activation
model.add(Convolution2D(12, 3, 3, subsample=(1, 1), border_mode="valid", activation='relu', name='conv3'))
# Add a convolution layer with Input = 30x30x6. Output = 28x28x9. Strides 1 and VALID padding.
# Perform RELU activation
model.add(Convolution2D(16, 3, 3, subsample=(1, 1), border_mode="valid", activation='relu', name='conv4'))
# Add Pooling layer with Input = 10x10x16. Output = 5x5x16. 2x2 kernel, Strides 2 and VALID padding
model.add(MaxPooling2D(pool_size=(2, 2), border_mode='valid', name='pool2'))
# Flatten. Input = 5x5x16. Output = 400.
model.add(Flatten(name='flat1'))
# Add dropout layer with 0.2
model.add(Dropout(0.2, name='dropout1'))
# Add Fully Connected layer. Input = 400. Output = 220
# Perform RELU activation
model.add(Dense(220, activation='relu', name='fc1'))
# Add Fully Connected layer. Input = 220. Output = 43
# Perform RELU activation
model.add(Dense(43, activation='relu', name='fc2'))
# Add Fully Connected layer. Input = 43. Output = 5
# Perform RELU activation
model.add(Dense(5, name='fc3'))
# Configure the model for training with Adam optimizer
# "mean squared error" loss objective and accuracy metrics
# Learning rate of 0.001 was chosen because this gave best performance after testing other values
model.compile(optimizer=Adam(lr=0.001), loss="mse", metrics=['accuracy'])
return model