def fire_module(x, squeeze=16, expand=64):
x = Convolution2D(squeeze, 1, 1, border_mode='valid')(x)
x = Activation('relu')(x)
left = Convolution2D(expand, 1, 1, border_mode='valid')(x)
left = Activation('relu')(left)
right= ZeroPadding2D(padding=(1, 1))(x)
right = Convolution2D(expand, 3, 3, border_mode='valid')(right)
right = Activation('relu')(right)
y = merge([left, right], mode='concat', concat_axis=1)
return y
# Original SqueezeNet from paper. Global Average Pool implemented manually with Average Pooling Layer
python类ZeroPadding2D()的实例源码
def fire_module(x, squeeze=16, expand=64):
x = Convolution2D(squeeze, 1, 1, border_mode='valid')(x)
x = Activation('relu')(x)
left = Convolution2D(expand, 1, 1, border_mode='valid')(x)
left = Activation('relu')(left)
right= ZeroPadding2D(padding=(1, 1))(x)
right = Convolution2D(expand, 3, 3, border_mode='valid')(right)
right = Activation('relu')(right)
x = merge([left, right], mode='concat', concat_axis=1)
return x
# Original SqueezeNet from paper. Global Average Pool implemented manually with Average Pooling Layer
def get_simple_model():
model = Sequential()
model.add(ZeroPadding2D(padding=(3, 3), input_shape=(nb_input_layers, NB_ROWS, NB_COLS)))
model.add(Convolution2D(96, 5, 5))
model.add(Activation('relu'))
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(Convolution2D(192, 3, 3))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
print("Compiling model")
model.compile(loss='categorical_crossentropy', optimizer='adam')
print("Compiled model")
return model
###############################################################################
def build_model(self):
img_input = Input(shape=(img_channels, img_rows, img_cols))
# one conv at the beginning (spatial size: 32x32)
x = ZeroPadding2D((1, 1))(img_input)
x = Convolution2D(16, nb_row=3, nb_col=3)(x)
# Stage 1 (spatial size: 32x32)
x = bottleneck(x, n, 16, 16 * k, dropout=0.3, subsample=(1, 1))
# Stage 2 (spatial size: 16x16)
x = bottleneck(x, n, 16 * k, 32 * k, dropout=0.3, subsample=(2, 2))
# Stage 3 (spatial size: 8x8)
x = bottleneck(x, n, 32 * k, 64 * k, dropout=0.3, subsample=(2, 2))
x = BatchNormalization(mode=0, axis=1)(x)
x = Activation('relu')(x)
x = AveragePooling2D((8, 8), strides=(1, 1))(x)
x = Flatten()(x)
preds = Dense(nb_classes, activation='softmax')(x)
self.model = Model(input=img_input, output=preds)
self.keras_get_params()
def VGG_16(weights_path=None):
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
print "convolution"
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
print"FLATTEN"
model.add(Dense(400, activation='relu'))
model.add(Dropout(0.5))
print"YO"
model.add(Dense(10, activation='softmax'))
return model
def test_zero_padding_2d(self):
nb_samples = 9
stack_size = 7
input_nb_row = 11
input_nb_col = 12
input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col))
layer = convolutional.ZeroPadding2D(padding=(2, 2))
layer.input = theano.shared(value=input)
for train in [True, False]:
out = layer.get_output(train).eval()
for offset in [0, 1, -1, -2]:
assert_allclose(out[:, :, offset, :], 0.)
assert_allclose(out[:, :, :, offset], 0.)
assert_allclose(out[:, :, 2:-2, 2:-2], 1.)
config = layer.get_config()
def get_residual_model(is_mnist=True, img_channels=1, img_rows=28, img_cols=28):
model = keras.models.Sequential()
first_layer_channel = 128
if is_mnist: # size to be changed to 32,32
model.add(ZeroPadding2D((2,2), input_shape=(img_channels, img_rows, img_cols))) # resize (28,28)-->(32,32)
# the first conv
model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same'))
else:
model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same', input_shape=(img_channels, img_rows, img_cols)))
model.add(Activation('relu'))
# [residual-based Conv layers]
residual_blocks = design_for_residual_blocks(num_channel_input=first_layer_channel)
model.add(residual_blocks)
model.add(BatchNormalization(axis=1))
model.add(Activation('relu'))
# [Classifier]
model.add(Flatten())
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# [END]
return model
def get_squeezenet(nb_classes, img_size = (64,64)):
input_img = Input(shape=(3, img_size[0], img_size[1]))
x = Convolution2D(96, 7, 7, subsample=(2, 2), border_mode='valid')(input_img)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)
x = fire_module(x, 16, 64)
x = fire_module(x, 16, 64)
x = fire_module(x, 32, 128)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)
x = fire_module(x, 32, 192)
x = fire_module(x, 48, 192)
x = fire_module(x, 48, 192)
x = fire_module(x, 64, 256)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)
x = fire_module(x, 64, 256)
x = Dropout(0.5)(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Convolution2D(nb_classes, 1, 1, border_mode='valid')(x)
# global pooling not available
x = GlobalAveragePooling2D()(x)
out = Dense(nb_classes, activation='softmax')(x)
model = Model(input=input_img, output=[out])
return model
def get_squeezenet(nb_classes):
input_img = Input(shape=(3, 227, 227))
x = Convolution2D(96, 7, 7, subsample=(2, 2), border_mode='valid')(input_img)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)
x = fire_module(x, 16, 64)
x = fire_module(x, 16, 64)
x = fire_module(x, 32, 128)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)
x = fire_module(x, 32, 192)
x = fire_module(x, 48, 192)
x = fire_module(x, 48, 192)
x = fire_module(x, 64, 256)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)
x = fire_module(x, 64, 256)
x = Dropout(0.5)(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Convolution2D(nb_classes, 1, 1, border_mode='valid')(x)
# global pooling not available
x = AveragePooling2D(pool_size=(15, 15))(x)
x = Flatten()(x)
out = Dense(nb_classes, activation='softmax')(x)
model = Model(input=input_img, output=[out])
return model
def get_small_squeezenet(nb_classes):
input_img = Input(shape=(3, 32, 32))
x = Convolution2D(16, 3, 3, border_mode='same')(input_img)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(3, 3))(x)
x = fire_module(x, 32, 128)
x = fire_module(x, 32, 128)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = fire_module(x, 48, 192)
x = fire_module(x, 48, 192)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = fire_module(x, 64, 256)
x = Dropout(0.5)(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Convolution2D(nb_classes, 1, 1, border_mode='valid')(x)
# global pooling not available
x = AveragePooling2D(pool_size=(4, 4))(x)
x = Flatten()(x)
out = Dense(nb_classes, activation='softmax')(x)
model = Model(input=input_img, output=[out])
return model
def Alexnet(height, width, weights_path=None):
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(3, height, width)))
model.add(Convolution2D(64, 11, 11, border_mode="same", activation="relu"))
model.add(BatchNormalization())
model.add(ZeroPadding2D((1, 1)))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Convolution2D(128, 7, 7, border_mode="same", activation="relu"))
model.add(BatchNormalization())
model.add(ZeroPadding2D((1, 1)))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Convolution2D(192, 3, 3, border_mode="same", activation="relu"))
model.add(BatchNormalization())
model.add(ZeroPadding2D((1, 1)))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Convolution2D(256, 3, 3, border_mode="same", activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Flatten())
model.add(Dense(4096, init='normal', activation="relu"))
model.add(BatchNormalization())
model.add(Dense(512, init='normal', activation="relu"))
model.add(BatchNormalization())
model.add(Dense(2, init='normal', activation="softmax"))
if weights_path:
print("Loading weights...", end='\t')
model.load_weights(weights_path)
print("Finished.")
return model
def get_simple_cnn(height, width):
""" A simple CNN that has the same input/output shapes as the VGG16 model.
Args:
height: input height
width: input width
Return: Keras model
"""
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(3, height, width)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((4, 4), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((4, 4), strides=(2, 2)))
#model.add(ZeroPadding2D((1, 1)))
#model.add(Convolution2D(64, 3, 3, activation='relu'))
#model.add(MaxPooling2D((4, 4), strides=(2, 2)))
#model.add(ZeroPadding2D((1, 1)))
#model.add(Convolution2D(512, 3, 3, activation='relu'))
#model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(Lambda(global_average_pooling,
output_shape=global_average_pooling_shape))
model.add(Dense(2, activation="softmax", init="uniform"))
return model
def ConvBlock(layers, model, filters):
for i in range(layers):
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(filters, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Dropout(0.25))
def _masked_conv(self, x, filter_size, stack_name, layer_idx, mask_type='B'):
if stack_name == 'vertical':
res = ZeroPadding2D(padding=(filter_size[0]//2, 0, filter_size[1]//2, filter_size[1]//2), name='v_pad_'+str(layer_idx))(x)
res = Convolution2D(2*self.nb_filters, filter_size[0]//2+1, filter_size[1], border_mode='valid', name='v_conv_'+str(layer_idx))(res)
elif stack_name == 'horizontal':
res = ZeroPadding2D(padding=(0, 0, filter_size[1]//2, 0), name='h_pad_'+str(layer_idx))(x)
if mask_type == 'A':
res = Convolution2D(2*self.nb_filters, 1, filter_size[1]//2, border_mode='valid', name='h_conv_'+str(layer_idx))(res)
else:
res = Convolution2D(2*self.nb_filters, 1, filter_size[1]//2+1, border_mode='valid', name='h_conv_'+str(layer_idx))(res)
return res
def _shift_down(x):
x_shape = K.int_shape(x)
x = ZeroPadding2D(padding=(1,0,0,0))(x)
x = Lambda(lambda x: x[:,:x_shape[1],:,:])(x)
return x
def __init__(self,
d_size=(3, 128, 64),
d_nb_filters=128,
d_scales=4,
d_FC=None,
d_init=None,
**kwargs):
super(Discriminator, self).__init__(**kwargs)
self.d_size = d_size
self.d_nb_filters = d_nb_filters
self.d_scales = d_scales
self.d_FC = d_FC
self.d_init = d_init if d_init is not None else InitNormal()
c, h, w = d_size # h and w should be multiply of 16
nf = d_nb_filters
for s in range(d_scales):
if s == 0:
self.add( ZeroPadding2D((2, 2), input_shape=d_size) )
else:
self.add( ZeroPadding2D((2, 2)) )
self.add( Convolution2D(nf*(2**s), 5, 5, subsample=(2,2), border_mode='valid',) )
self.add( BN() )
# self.add( BatchNormalization(beta_init='zero', gamma_init='one', mode=2, axis=1) )
self.add( LeakyReLU(0.2) )
self.add( Flatten() )
if d_FC is not None:
for fc_dim in d_FC:
self.add( Dense(fc_dim,) )
self.add( LeakyReLU(0.2) )
self.add( BN() )
# self.add( BatchNormalization(beta_init='zero', gamma_init='one', mode=2) )
self.add( LeakyReLU(0.2) )
self.add( Dense(1, activation='sigmoid') )
d_init(self)
def __init__(self,
d_size=(3, 128, 64),
d_nb_filters=128,
d_scales=4,
d_FC=None,
d_init=None,
**kwargs):
super(Critic, self).__init__(**kwargs)
self.d_size = d_size
self.d_nb_filters = d_nb_filters
self.d_scales = d_scales
self.d_FC = d_FC
self.d_init = d_init if d_init is not None else InitNormal()
c, h, w = d_size # h and w should be multiply of 16
nf = d_nb_filters
for s in range(d_scales):
if s == 0:
self.add( ZeroPadding2D((2, 2), input_shape=d_size) )
else:
self.add( ZeroPadding2D((2, 2)) )
self.add( Convolution2D(nf*(2**s), 5, 5, subsample=(2,2), border_mode='valid',) )
self.add( BN() )
# self.add( BatchNormalization(beta_init='zero', gamma_init='one', mode=2, axis=1) )
self.add( LeakyReLU(0.2) )
self.add( Flatten() )
if d_FC is not None:
for fc_dim in d_FC:
self.add( Dense(fc_dim,) )
self.add( LeakyReLU(0.2) )
self.add( BN() )
# self.add( BatchNormalization(beta_init='zero', gamma_init='one', mode=2) )
self.add( LeakyReLU(0.2) )
self.add( Dense(1, activation='linear',) )
d_init(self)
def __add_convolutional_layers(self):
# first convolutional layer
self.model.add(ZeroPadding2D((1,1),input_shape=(1,28,28)))
self.model.add(Convolution2D(32,3,3, activation='relu'))
# second convolutional layer
self.model.add(ZeroPadding2D((1,1)))
self.model.add(Convolution2D(48,3,3, activation='relu'))
self.model.add(MaxPooling2D(pool_size=(2,2)))
# third convolutional layer
self.model.add(ZeroPadding2D((1,1)))
self.model.add(Convolution2D(32,3,3, activation='relu'))
self.model.add(MaxPooling2D(pool_size=(2,2)))
def conv2D_lrn2d(x, nb_filter, nb_row, nb_col,
padding='same', strides=(1, 1),
activation='relu', LRN2D_norm=True, bias_initializer='zeros',kernel_initializer='glorot_uniform',
weight_decay=WEIGHT_DECAY, data_format="channels_first",name='conv'):
'''
Info:
Function taken from the Inceptionv3.py script keras github
Utility function to apply to a tensor a module Convolution + lrn2d
with optional weight decay (L2 weight regularization).
'''
if weight_decay:
W_regularizer = regularizers.l2(weight_decay)
b_regularizer = regularizers.l2(weight_decay)
else:
W_regularizer = None
b_regularizer = None
x = Conv2D(nb_filter, (nb_row, nb_col), bias_regularizer=b_regularizer,
activation=activation,
data_format="channels_first",
padding=padding,
strides=strides,
bias_initializer='zeros',kernel_initializer='glorot_uniform',
kernel_regularizer=W_regularizer, use_bias=False,name=name)(x)
x = ZeroPadding2D(padding=(1, 1), data_format="channels_first")(x)
if LRN2D_norm:
x = LRN2D(alpha=ALPHA, beta=BETA)(x)
x = ZeroPadding2D(padding=(1, 1), data_format="channels_first")(x)
return x
visual_model_zoo.py 文件源码
项目:visual_turing_test-tutorial
作者: mateuszmalinowski
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def create(self):
model = Sequential()
model.add(ZeroPadding2D((1,1), input_shape=self._visual_dim))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(Flatten())
self._model_output_dim = 4096
model.add(Dense(self._model_output_dim, activation='relu'))
model.add(Dropout(0.5))
if self._weights_path:
model.load_weights(self._weights_path)
return model
def vgg_16(weights_path=None):
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='softmax'))
if weights_path:
model.load_weights(weights_path)
return model
def VGG_16(weights_path=None):
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
#top layer of the VGG net
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='softmax'))
if weights_path:
model.load_weights(weights_path)
return model
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1):
# main branch
internal = output // internal_scale
encoder = inp
# 1x1
input_stride = 2 if downsample else 1 # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
encoder = Conv2D(internal, (input_stride, input_stride),
# padding='same',
strides=(input_stride, input_stride), use_bias=False)(encoder)
# Batch normalization + PReLU
encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99
encoder = PReLU(shared_axes=[1, 2])(encoder)
# conv
if not asymmetric and not dilated:
encoder = Conv2D(internal, (3, 3), padding='same')(encoder)
elif asymmetric:
encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder)
encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder)
elif dilated:
encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder)
else:
raise(Exception('You shouldn\'t be here'))
encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99
encoder = PReLU(shared_axes=[1, 2])(encoder)
# 1x1
encoder = Conv2D(output, (1, 1), use_bias=False)(encoder)
encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99
encoder = SpatialDropout2D(dropout_rate)(encoder)
other = inp
# other branch
if downsample:
other = MaxPooling2D()(other)
other = Permute((1, 3, 2))(other)
pad_feature_maps = output - inp.get_shape().as_list()[3]
tb_pad = (0, 0)
lr_pad = (0, pad_feature_maps)
other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
other = Permute((1, 3, 2))(other)
encoder = add([encoder, other])
encoder = PReLU(shared_axes=[1, 2])(encoder)
return encoder
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1):
# main branch
internal = output // internal_scale
encoder = inp
# 1x1
input_stride = 2 if downsample else 1 # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
encoder = Conv2D(internal, (input_stride, input_stride),
# padding='same',
strides=(input_stride, input_stride), use_bias=False)(encoder)
# Batch normalization + PReLU
encoder = BatchNormalization(momentum=0.1)(encoder) # enet_unpooling uses momentum of 0.1, keras default is 0.99
encoder = PReLU(shared_axes=[1, 2])(encoder)
# conv
if not asymmetric and not dilated:
encoder = Conv2D(internal, (3, 3), padding='same')(encoder)
elif asymmetric:
encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder)
encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder)
elif dilated:
encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder)
else:
raise(Exception('You shouldn\'t be here'))
encoder = BatchNormalization(momentum=0.1)(encoder) # enet_unpooling uses momentum of 0.1, keras default is 0.99
encoder = PReLU(shared_axes=[1, 2])(encoder)
# 1x1
encoder = Conv2D(output, (1, 1), use_bias=False)(encoder)
encoder = BatchNormalization(momentum=0.1)(encoder) # enet_unpooling uses momentum of 0.1, keras default is 0.99
encoder = SpatialDropout2D(dropout_rate)(encoder)
other = inp
# other branch
if downsample:
other, indices = MaxPoolingWithArgmax2D()(other)
other = Permute((1, 3, 2))(other)
pad_feature_maps = output - inp.get_shape().as_list()[3]
tb_pad = (0, 0)
lr_pad = (0, pad_feature_maps)
other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
other = Permute((1, 3, 2))(other)
encoder = add([encoder, other])
encoder = PReLU(shared_axes=[1, 2])(encoder)
if downsample:
return encoder, indices
else:
return encoder
def minst_attention(inc_noise=False, attention=True):
#make layers
inputs = Input(shape=(1,image_size,image_size),name='input')
conv_1a = Convolution2D(32, 3, 3,activation='relu',name='conv_1')
maxp_1a = MaxPooling2D((3, 3), strides=(2,2),name='convmax_1')
norm_1a = crosschannelnormalization(name="convpool_1")
zero_1a = ZeroPadding2D((2,2),name='convzero_1')
conv_2a = Convolution2D(32,3,3,activation='relu',name='conv_2')
maxp_2a = MaxPooling2D((3, 3), strides=(2,2),name='convmax_2')
norm_2a = crosschannelnormalization(name="convpool_2")
zero_2a = ZeroPadding2D((2,2),name='convzero_2')
dense_1a = Lambda(global_average_pooling,output_shape=global_average_pooling_shape,name='dense_1')
dense_2a = Dense(10, activation = 'softmax', init='uniform',name='dense_2')
#make actual model
if inc_noise:
inputs_noise = noise.GaussianNoise(2.5)(inputs)
input_pad = ZeroPadding2D((1,1),input_shape=(1,image_size,image_size),name='input_pad')(inputs_noise)
else:
input_pad = ZeroPadding2D((1,1),input_shape=(1,image_size,image_size),name='input_pad')(inputs)
conv_1 = conv_1a(input_pad)
conv_1 = maxp_1a(conv_1)
conv_1 = norm_1a(conv_1)
conv_1 = zero_1a(conv_1)
conv_2_x = conv_2a(conv_1)
conv_2 = maxp_2a(conv_2_x)
conv_2 = norm_2a(conv_2)
conv_2 = zero_2a(conv_2)
conv_2 = Dropout(0.5)(conv_2)
dense_1 = dense_1a(conv_2)
dense_2 = dense_2a(dense_1)
conv_shape1 = Lambda(change_shape1,output_shape=(32,),name='chg_shape')(conv_2_x)
find_att = dense_2a(conv_shape1)
if attention:
find_att = Lambda(attention_control,output_shape=att_shape,name='att_con')([find_att,dense_2])
else:
find_att = Lambda(no_attention_control,output_shape=att_shape,name='att_con')([find_att,dense_2])
zero_3a = ZeroPadding2D((1,1),name='convzero_3')(find_att)
apply_attention = Merge(mode='mul',name='attend')([zero_3a,conv_1])
conv_3 = conv_2a(apply_attention)
conv_3 = maxp_2a(conv_3)
conv_3 = norm_2a(conv_3)
conv_3 = zero_2a(conv_3)
dense_3 = dense_1a(conv_3)
dense_4 = dense_2a(dense_3)
model = Model(input=inputs,output=dense_4)
return model
def get_model():
model = Sequential()
model.add(ZeroPadding2D(padding=(3, 3), input_shape=(nb_input_layers, NB_ROWS, NB_COLS)))
model.add(Convolution2D(96, 5, 5))
model.add(Activation('relu'))
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(Convolution2D(192, 3, 3))
model.add(Activation('relu'))
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(Convolution2D(192, 3, 3))
model.add(Activation('relu'))
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(Convolution2D(192, 3, 3))
model.add(Activation('relu'))
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(Convolution2D(192, 3, 3))
model.add(Activation('relu'))
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(Convolution2D(192, 3, 3))
model.add(Activation('relu'))
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(Convolution2D(192, 3, 3))
model.add(Activation('relu'))
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(Convolution2D(192, 3, 3))
model.add(Activation('relu'))
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(Convolution2D(192, 3, 3))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
print("Compiling model")
model.compile(loss='categorical_crossentropy', optimizer='adam')
print("Compiled model")
model.load_weights("../run2/epoch_45_weights.h5")
return model
def VGG_16(height, width, weights_path=None):
"""
VGG Model Keras specification
args: weights_path (str) trained weights file path
returns model (Keras model)
"""
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(3, height, width)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(Lambda(global_average_pooling,
output_shape=global_average_pooling_shape))
model.add(Dense(2, activation="softmax", init="uniform"))
if weights_path:
print("Loading weights...", end='\t')
model.load_weights(weights_path)
print("Finished.")
return model
def wide_basic(incoming, nb_in_filters, nb_out_filters, dropout=None, subsample=(2, 2)):
nb_bottleneck_filter = nb_out_filters
if nb_in_filters == nb_out_filters:
# conv3x3
y = BatchNormalization(mode=0, axis=1)(incoming)
y = Activation('relu')(y)
y = ZeroPadding2D((1, 1))(y)
y = Convolution2D(nb_bottleneck_filter, nb_row=3, nb_col=3,
subsample=subsample, init='he_normal', border_mode='valid')(y)
# conv3x3
y = BatchNormalization(mode=0, axis=1)(y)
y = Activation('relu')(y)
if dropout is not None:
y = Dropout(dropout)(y)
y = ZeroPadding2D((1, 1))(y)
y = Convolution2D(nb_bottleneck_filter, nb_row=3, nb_col=3,
subsample=(1, 1), init='he_normal', border_mode='valid')(y)
return merge([incoming, y], mode='sum')
else: # Residual Units for increasing dimensions
# common BN, ReLU
shortcut = BatchNormalization(mode=0, axis=1)(incoming)
shortcut = Activation('relu')(shortcut)
# conv3x3
y = ZeroPadding2D((1, 1))(shortcut)
y = Convolution2D(nb_bottleneck_filter, nb_row=3, nb_col=3,
subsample=subsample, init='he_normal', border_mode='valid')(y)
# conv3x3
y = BatchNormalization(mode=0, axis=1)(y)
y = Activation('relu')(y)
if dropout is not None:
y = Dropout(dropout)(y)
y = ZeroPadding2D((1, 1))(y)
y = Convolution2D(nb_out_filters, nb_row=3, nb_col=3,
subsample=(1, 1), init='he_normal', border_mode='valid')(y)
# shortcut
shortcut = Convolution2D(nb_out_filters, nb_row=1, nb_col=1,
subsample=subsample, init='he_normal', border_mode='same')(shortcut)
return merge([shortcut, y], mode='sum')
def VGG_16(weights_path=None):
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='softmax'))
if weights_path:
model.load_weights(weights_path)
return model
def VGG_16_graph():
model = Graph()
model.add_input(name='input', input_shape=(3, 224, 224))
model.add_node(ZeroPadding2D((1,1)), name='pad1', input='input')
model.add_node(Convolution2D(64, 3, 3, activation='relu'), name='relu1', input='pad1') # weights=sequence_model.layers[1].W.container
model.add_node(ZeroPadding2D((1,1)), name='pad2', input='relu1')
model.add_node(Convolution2D(64, 3, 3, activation='relu'), name='relu2', input='pad2')
model.add_node(MaxPooling2D((2,2), strides=(2,2)), name='pool1', input='relu2')
model.add_node(ZeroPadding2D((1,1)), name='1', input='pool1')
model.add_node(Convolution2D(128, 3, 3, activation='relu'), name='2', input='1')
model.add_node(ZeroPadding2D((1,1)), name='3', input='2')
model.add_node(Convolution2D(128, 3, 3, activation='relu'), name='4', input='3')
model.add_node(MaxPooling2D((2,2), strides=(2,2)), name='5', input='4')
model.add_node(ZeroPadding2D((1,1)), name='6', input='5')
model.add_node(Convolution2D(256, 3, 3, activation='relu'), name='7', input='6')
model.add_node(ZeroPadding2D((1,1)), name='8', input='7')
model.add_node(Convolution2D(256, 3, 3, activation='relu'), name='9', input='8')
model.add_node(ZeroPadding2D((1,1)), name='10', input='9')
model.add_node(Convolution2D(256, 3, 3, activation='relu'), name='11', input='10')
model.add_node(MaxPooling2D((2,2), strides=(2,2)), name='12', input='11')
model.add_node(ZeroPadding2D((1,1)), name='13', input='12')
model.add_node(Convolution2D(512, 3, 3, activation='relu'), name='14', input='13')
model.add_node(ZeroPadding2D((1,1)), name='15', input='14')
model.add_node(Convolution2D(512, 3, 3, activation='relu'), name='16', input='15')
model.add_node(ZeroPadding2D((1,1)), name='17', input='16')
model.add_node(Convolution2D(512, 3, 3, activation='relu'), name='18', input='17')
model.add_node(MaxPooling2D((2,2), strides=(2,2)), name='19', input='18')
model.add_node(ZeroPadding2D((1,1)), name='20', input='19')
model.add_node(Convolution2D(512, 3, 3, activation='relu'), name='21', input='20')
model.add_node(ZeroPadding2D((1,1)), name='22', input='21')
model.add_node(Convolution2D(512, 3, 3, activation='relu'), name='23', input='22')
model.add_node(ZeroPadding2D((1,1)), name='24', input='23')
model.add_node(Convolution2D(512, 3, 3, activation='relu'), name='25', input='24')
model.add_node(MaxPooling2D((2,2), strides=(2,2)), name='26', input='25')
model.add_node(Flatten(), name='27', input='26')
model.add_node(Dense(4096, activation='relu'), name='28', input='27')
model.add_node(Dropout(0.5), name='29', input='28')
model.add_node(Dense(4096, activation='relu'), name='30', input='29')
model.add_node(Dropout(0.5), name='31', input='30')
model.add_node(Dense(1000, activation='softmax'), name='32', input='31')
model.add_output(input='32', name='output')
return model