def resnet_model(nb_blocks, bottleneck=True, l2_reg=1e-4):
nb_channels = [16, 32, 64]
inputs = Input((32, 32, 3))
x = Convolution2D(16, 3, 3, border_mode='same', init='he_normal',
W_regularizer=l2(l2_reg), bias=False)(inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)
for n, f in zip(nb_channels, [True, False, False]):
x = block_stack(x, n, nb_blocks, bottleneck=bottleneck, l2_reg=l2_reg,
first=f)
# Last BN-Relu
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
x = Dense(10)(x)
x = Activation('softmax')(x)
model = Model(input=inputs, output=x)
return model
python类GlobalAveragePooling2D()的实例源码
def resnet_cifar10(repetations, input_shape):
x = Input(shape=input_shape)
conv1 = Convolution2D(16, 3, 3, init='he_normal', border_mode='same',
W_regularizer=l2(1e-4))(x)
# feature map size (32, 32, 16)
# Build residual blocks..
block_fn = _basic_block
block1 = _residual_block(block_fn, 16, repetations, (1, 1))(conv1)
# feature map size (16, 16)
block2 = _residual_block(block_fn, 32, repetations, (2, 2))(block1)
# feature map size (8, 8)
block3 = _residual_block(block_fn, 64, repetations, (2, 2))(block2)
post_block_norm = BatchNormalization(mode=2, axis=3)(block3)
post_blob_relu = Activation("relu")(post_block_norm)
# Classifier block
pool2 = GlobalAveragePooling2D()(post_blob_relu)
dense = Dense(output_dim=10, init="he_normal",
W_regularizer=l2(1e-4), activation="softmax")(pool2)
model = Model(input=x, output=dense)
return model
def test_globalpooling_2d():
layer_test(pooling.GlobalMaxPooling2D,
kwargs={'dim_ordering': 'th'},
input_shape=(3, 4, 5, 6))
layer_test(pooling.GlobalMaxPooling2D,
kwargs={'dim_ordering': 'tf'},
input_shape=(3, 5, 6, 4))
layer_test(pooling.GlobalAveragePooling2D,
kwargs={'dim_ordering': 'th'},
input_shape=(3, 4, 5, 6))
layer_test(pooling.GlobalAveragePooling2D,
kwargs={'dim_ordering': 'tf'},
input_shape=(3, 5, 6, 4))
def get_squeezenet(nb_classes, img_size = (64,64)):
input_img = Input(shape=(3, img_size[0], img_size[1]))
x = Convolution2D(96, 7, 7, subsample=(2, 2), border_mode='valid')(input_img)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)
x = fire_module(x, 16, 64)
x = fire_module(x, 16, 64)
x = fire_module(x, 32, 128)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)
x = fire_module(x, 32, 192)
x = fire_module(x, 48, 192)
x = fire_module(x, 48, 192)
x = fire_module(x, 64, 256)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)
x = fire_module(x, 64, 256)
x = Dropout(0.5)(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Convolution2D(nb_classes, 1, 1, border_mode='valid')(x)
# global pooling not available
x = GlobalAveragePooling2D()(x)
out = Dense(nb_classes, activation='softmax')(x)
model = Model(input=input_img, output=[out])
return model
def get_squeezenet(nb_classes, dim_ordering='th'):
base_model = get_squeezenet_top()
x = base_model.layers[-1].output
x = Convolution2D(nb_classes, 1, 1, border_mode='valid', name='conv10')(x)
x = Activation('relu', name='relu_conv10')(x)
x = GlobalAveragePooling2D()(x)
out = Activation('softmax', name='loss')(x)
model = Model(input=base_model.input, output=[out])
return model
def test_globalpooling_2d():
layer_test(pooling.GlobalMaxPooling2D,
kwargs={'dim_ordering': 'th'},
input_shape=(3, 4, 5, 6))
layer_test(pooling.GlobalMaxPooling2D,
kwargs={'dim_ordering': 'tf'},
input_shape=(3, 5, 6, 4))
layer_test(pooling.GlobalAveragePooling2D,
kwargs={'dim_ordering': 'th'},
input_shape=(3, 4, 5, 6))
layer_test(pooling.GlobalAveragePooling2D,
kwargs={'dim_ordering': 'tf'},
input_shape=(3, 5, 6, 4))
def densenet_model(nb_blocks, nb_layers, growth_rate, dropout=0., l2_reg=1e-4,
init_channels=16):
n_channels = init_channels
inputs = Input(shape=(32, 32, 3))
x = Convolution2D(init_channels, 3, 3, border_mode='same',
init='he_normal', W_regularizer=l2(l2_reg),
bias=False)(inputs)
for i in range(nb_blocks - 1):
# Create a dense block
x = dense_block(x, nb_layers, growth_rate,
dropout=dropout, l2_reg=l2_reg)
# Update the number of channels
n_channels += nb_layers*growth_rate
# Transition layer
x = transition_block(x, n_channels, dropout=dropout, l2_reg=l2_reg)
# Add last dense_block
x = dense_block(x, nb_layers, growth_rate, dropout=dropout, l2_reg=l2_reg)
# Add final BN-Relu
x = BatchNormalization(gamma_regularizer=l2(l2_reg),
beta_regularizer=l2(l2_reg))(x)
x = Activation('relu')(x)
# Global average pooling
x = GlobalAveragePooling2D()(x)
x = Dense(10, W_regularizer=l2(l2_reg))(x)
x = Activation('softmax')(x)
model = Model(input=inputs, output=x)
return model
# Apply preprocessing as described in the paper: normalize each channel
# individually. We use the values from fb.resnet.torch, but computing the values
# gets a very close answer.
def test_globalpooling_2d():
layer_test(pooling.GlobalMaxPooling2D,
kwargs={'dim_ordering': 'th'},
input_shape=(3, 4, 5, 6))
layer_test(pooling.GlobalMaxPooling2D,
kwargs={'dim_ordering': 'tf'},
input_shape=(3, 5, 6, 4))
layer_test(pooling.GlobalAveragePooling2D,
kwargs={'dim_ordering': 'th'},
input_shape=(3, 4, 5, 6))
layer_test(pooling.GlobalAveragePooling2D,
kwargs={'dim_ordering': 'tf'},
input_shape=(3, 5, 6, 4))
def get_squeezenet(nb_classes, dim_ordering='tf', include_top=True):
if dim_ordering is 'th':
input_img = Input(shape=(3, 227, 227))
elif dim_ordering is 'tf':
input_img = Input(shape=(227, 227, 3))
else:
raise NotImplementedError("Theano and Tensorflow are only available")
x = Convolution2D(64, 3, 3, subsample=(2, 2), border_mode='valid', name='conv1')(input_img)
x = Activation('relu', name='relu_conv1')(x)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool1')(x)
x = fire_module(x, fire_id=2, squeeze=16, expand=64, dim_ordering=dim_ordering)
x = fire_module(x, fire_id=3, squeeze=16, expand=64, dim_ordering=dim_ordering)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool3')(x)
x = fire_module(x, fire_id=4, squeeze=32, expand=128, dim_ordering=dim_ordering)
x = fire_module(x, fire_id=5, squeeze=32, expand=128, dim_ordering=dim_ordering)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool5')(x)
x = fire_module(x, fire_id=6, squeeze=48, expand=192, dim_ordering=dim_ordering)
x = fire_module(x, fire_id=7, squeeze=48, expand=192, dim_ordering=dim_ordering)
x = fire_module(x, fire_id=8, squeeze=64, expand=256, dim_ordering=dim_ordering)
x = fire_module(x, fire_id=9, squeeze=64, expand=256, dim_ordering=dim_ordering)
x = Dropout(0.5, name='drop9')(x)
x = Convolution2D(nb_classes, 1, 1, border_mode='valid', name='conv10')(x)
x = Activation('relu', name='relu_conv10')(x)
if include_top:
x = GlobalAveragePooling2D()(x)
out = Activation('softmax', name='loss')(x)
else:
out = x
model = Model(input=input_img, output=[out])
return model
def create_dense_net(nb_classes, img_dim, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=16, dropout_rate=None,
weight_decay=1E-4, verbose=True):
''' Build the create_dense_net model
Args:
nb_classes: number of classes
img_dim: tuple of shape (channels, rows, columns) or (rows, columns, channels)
depth: number or layers
nb_dense_block: number of dense blocks to add to end
growth_rate: number of filters to add
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay
Returns: keras tensor with nb_layers of conv_block appended
'''
model_input = Input(shape=img_dim)
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
assert (depth - 4) % 3 == 0, "Depth must be 3 N + 4"
# layers in each dense block
nb_layers = int((depth - 4) / 3)
# Initial convolution
x = Convolution2D(nb_filter, 3, 3, init="he_uniform", border_mode="same", name="initial_conv2D", bias=False,
W_regularizer=l2(weight_decay))(model_input)
x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
# Add dense blocks
for block_idx in range(nb_dense_block - 1):
x, nb_filter = dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=dropout_rate,
weight_decay=weight_decay)
# add transition_block
x = transition_block(x, nb_filter, dropout_rate=dropout_rate, weight_decay=weight_decay)
# The last dense_block does not have a transition_block
x, nb_filter = dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=dropout_rate,
weight_decay=weight_decay)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
x = Dense(nb_classes, activation='softmax', W_regularizer=l2(weight_decay), b_regularizer=l2(weight_decay))(x)
densenet = Model(input=model_input, output=x, name="create_dense_net")
if verbose: print("DenseNet-%d-%d created." % (depth, growth_rate))
return densenet
def DenseNet(inputs, n_classes, n_dense_block, n_layers, growth_rate,
n_initial_filters, compression=1.0, use_bottleneck=False,
dropout=None, weight_decay=1e-4, initial_conv=3):
# Initial convolution
if initial_conv==3:
l, n_filters = initial_conv_3x3(inputs, n_initial_filters,
weight_decay)
elif initial_conv==7:
l, n_filters = initial_conv_7x7(inputs, n_initial_filters,
weight_decay)
else:
raise ValueError('Unknown initial convolution')
# Add dense blocks
for block_idx in range(n_dense_block):
# Add dense block
l, n_filters = dense_block(l, n_layers[block_idx], n_filters,
growth_rate, dropout, use_bottleneck,
weight_decay, name='block'+str(block_idx))
# Add transition down except for the last block
if block_idx<(n_dense_block - 1):
l, n_filters = transition_down(l, n_filters, compression, dropout,
weight_decay,
name='block'+str(block_idx))
# Add classifier at the end
l = BatchNormalization(mode=0, axis=channel_idx,
gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay),
name='classifier_bn')(l)
l = Activation('relu', name='classifier_relu')(l)
l = GlobalAveragePooling2D(name='classifier_pool')(l)
l = Dense(n_classes,
activation='softmax',
W_regularizer=l2(weight_decay),
b_regularizer=l2(weight_decay),
name='classifier_dense')(l)
model = Model(input=[inputs], output=[l], name="DenseNet")
return model
# Build the densenet model