def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)
pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)
# out = merge([out,pooling],mode='sum')
out = add([out,pooling])
return out
python类BatchNormalization()的实例源码
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = BatchNormalization()(x)
out = Activation('relu')(out)
out = Conv2D(k1,kernel_size,padding='same',data_format='channels_last')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)
pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)
out = add([out, pooling])
#out = merge([out,pooling])
return out
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = Conv1D(k1,1,padding='same')(tensor_input)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv1D(k2,kernel_size,strides=2,padding='same')(out)
pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(tensor_input)
# out = merge([out,pooling],mode='sum')
out = add([out,pooling])
return out
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = BatchNormalization()(x)
out = Activation('relu')(out)
out = Conv1D(k1,kernel_size,padding='same')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv1D(k2,kernel_size,strides=2,padding='same')(out)
pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(x)
out = add([out, pooling])
#out = merge([out,pooling])
return out
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=2,dropout=0.5):
k1,k2 = filters
out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv2D(k2,kernel_size,2,padding='same',data_format='channels_last')(out)
pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)
# out = merge([out,pooling],mode='sum')
out = add([out,pooling])
return out
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = BatchNormalization()(x)
out = Activation('relu')(out)
out = Conv2D(k1,kernel_size,2,padding='same',data_format='channels_last')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv2D(k2,kernel_size,2,padding='same',data_format='channels_last')(out)
pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)
out = add([out, pooling])
#out = merge([out,pooling])
return out
def small_nn(self):
model = Sequential()
model.add(Conv2D(64, (self.stride, self.stride,), name='conv1',
padding='same',
activation='relu',
input_shape=self.ip_shape[1:]))
model.add(MaxPooling2D(pool_size=(2, 2), name='pool1'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(32, activation='relu', name='dense1'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax', name='dense2'))
adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
return model
def small_nn_soft(self, temp):
model = Sequential()
model.add(Conv2D(64, (self.stride, self.stride,), name='conv1',
padding='same',
activation='relu',
input_shape=self.ip_shape[1:]))
model.add(MaxPooling2D(pool_size=(2, 2), name='pool1'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(32, activation='relu', name='dense1'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(10, name='dense2'))
model.add(Lambda(lambda x: x / temp))
model.add(Activation('softmax'))
adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
return model
def build_model(self):
img_input = Input(shape=(img_channels, img_rows, img_cols))
# one conv at the beginning (spatial size: 32x32)
x = ZeroPadding2D((1, 1))(img_input)
x = Convolution2D(16, nb_row=3, nb_col=3)(x)
# Stage 1 (spatial size: 32x32)
x = bottleneck(x, n, 16, 16 * k, dropout=0.3, subsample=(1, 1))
# Stage 2 (spatial size: 16x16)
x = bottleneck(x, n, 16 * k, 32 * k, dropout=0.3, subsample=(2, 2))
# Stage 3 (spatial size: 8x8)
x = bottleneck(x, n, 32 * k, 64 * k, dropout=0.3, subsample=(2, 2))
x = BatchNormalization(mode=0, axis=1)(x)
x = Activation('relu')(x)
x = AveragePooling2D((8, 8), strides=(1, 1))(x)
x = Flatten()(x)
preds = Dense(nb_classes, activation='softmax')(x)
self.model = Model(input=img_input, output=preds)
self.keras_get_params()
def double_conv_layer(x, size, dropout, batch_norm):
from keras.models import Model
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Dropout, Activation
conv = Convolution2D(size, 3, 3, border_mode='same')(x)
if batch_norm == True:
conv = BatchNormalization(mode=0, axis=1)(conv)
conv = Activation('relu')(conv)
conv = Convolution2D(size, 3, 3, border_mode='same')(conv)
if batch_norm == True:
conv = BatchNormalization(mode=0, axis=1)(conv)
conv = Activation('relu')(conv)
if dropout > 0:
conv = Dropout(dropout)(conv)
return conv
def make_dcgan_discriminator(Xk_d):
x = Convolution2D(nb_filter=64, nb_row=5, nb_col=5, subsample=(2,2),
activation=None, border_mode='same', init='glorot_uniform',
dim_ordering='th')(Xk_d)
x = BatchNormalization(mode=2, axis=1)(x)
x = LeakyReLU(0.2)(x)
x = Convolution2D(nb_filter=128, nb_row=5, nb_col=5, subsample=(2,2),
activation=None, border_mode='same', init='glorot_uniform',
dim_ordering='th')(x)
x = BatchNormalization(mode=2, axis=1)(x)
x = LeakyReLU(0.2)(x)
x = Flatten()(x)
x = Dense(1024)(x)
x = BatchNormalization(mode=2)(x)
x = LeakyReLU(0.2)(x)
d = Dense(1, activation=None)(x)
return d
def make_dcgan_generator(Xk_g, n_lat, n_chan=1):
n_g_hid1 = 1024 # size of hidden layer in generator layer 1
n_g_hid2 = 128 # size of hidden layer in generator layer 2
x = Dense(n_g_hid1)(Xk_g)
x = BatchNormalization(mode=2)(x)
x = Activation('relu')(x)
x = Dense(n_g_hid2*7*7)(x)
x = BatchNormalization(mode=2)(x)
x = Activation('relu')(x)
x = Reshape((n_g_hid2, 7, 7))(x)
x = Deconvolution2D(64, 5, 5, output_shape=(128, 64, 14, 14),
border_mode='same', activation=None, subsample=(2,2),
init='orthogonal', dim_ordering='th')(x)
x = BatchNormalization(mode=2, axis=1)(x)
x = Activation('relu')(x)
g = Deconvolution2D(n_chan, 5, 5, output_shape=(128, n_chan, 28, 28),
border_mode='same', activation='sigmoid', subsample=(2,2),
init='orthogonal', dim_ordering='th')(x)
return g
def make_dcgan_discriminator(Xk_d):
x = Convolution2D(nb_filter=64, nb_row=4, nb_col=4, subsample=(2,2),
activation=None, border_mode='same', init=conv2D_init,
dim_ordering='th')(Xk_d)
# x = BatchNormalization(mode=2, axis=1)(x) # <- makes things much worse!
x = LeakyReLU(0.2)(x)
x = Convolution2D(nb_filter=128, nb_row=4, nb_col=4, subsample=(2,2),
activation=None, border_mode='same', init=conv2D_init,
dim_ordering='th')(x)
x = BatchNormalization(mode=2, axis=1)(x)
x = LeakyReLU(0.2)(x)
x = Flatten()(x)
x = Dense(1024, init=conv2D_init)(x)
x = BatchNormalization(mode=2)(x)
x = LeakyReLU(0.2)(x)
d = Dense(1, activation=None)(x)
return d
def make_dcgan_generator(Xk_g, n_lat, n_chan=1):
n_g_hid1 = 1024 # size of hidden layer in generator layer 1
n_g_hid2 = 128 # size of hidden layer in generator layer 2
x = Dense(n_g_hid1, init=conv2D_init)(Xk_g)
x = BatchNormalization(mode=2, )(x)
x = Activation('relu')(x)
x = Dense(n_g_hid2*7*7, init=conv2D_init)(x)
x = Reshape((n_g_hid2, 7, 7))(x)
x = BatchNormalization(mode=2, axis=1)(x)
x = Activation('relu')(x)
x = Deconvolution2D(64, 5, 5, output_shape=(128, 64, 14, 14),
border_mode='same', activation=None, subsample=(2,2),
init=conv2D_init, dim_ordering='th')(x)
x = BatchNormalization(mode=2, axis=1)(x)
x = Activation('relu')(x)
g = Deconvolution2D(n_chan, 5, 5, output_shape=(128, n_chan, 28, 28),
border_mode='same', activation='sigmoid', subsample=(2,2),
init=conv2D_init, dim_ordering='th')(x)
return g
def conv2d_bn(x, nb_filter, nb_row, nb_col, border_mode='same', subsample=(1, 1), name=None):
"""
Utility function to apply conv + BN.
"""
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
bn_axis = 3
x = Convolution2D(nb_filter, nb_row, nb_col,
subsample=subsample,
activation='relu',
border_mode=border_mode,
name=conv_name)(x)
x = BatchNormalization(axis=bn_axis, name=bn_name)(x)
return x
def test_medium_conv_batchnorm_random(self):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 3)
num_kernels = 3
kernel_height = 5
kernel_width = 5
data_mean = 2
data_var = 1
# Define a model
from keras.layers.normalization import BatchNormalization
model = Sequential()
model.add(Convolution2D(input_shape = input_shape,
nb_filter = num_kernels, nb_row = kernel_height,
nb_col = kernel_width))
model.add(BatchNormalization(epsilon=1e-5))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_keras_model(model)
def test_tiny_mcrnn_music_tagger(self):
x_in = Input(shape=(4,6,1))
x = ZeroPadding2D(padding=(0, 1))(x_in)
x = BatchNormalization(axis=2, name='bn_0_freq')(x)
# Conv block 1
x = Convolution2D(2, 3, 3, border_mode='same', name='conv1')(x)
x = BatchNormalization(axis=3, mode=0, name='bn1')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(x)
# Conv block 2
x = Convolution2D(4, 3, 3, border_mode='same', name='conv2')(x)
x = BatchNormalization(axis=3, mode=0, name='bn2')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(x)
# Should get you (1,1,2,4)
x = Reshape((2, 4))(x)
x = GRU(32, return_sequences=True, name='gru1')(x)
x = GRU(32, return_sequences=False, name='gru2')(x)
# Create model.
model = Model(x_in, x)
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_keras_model(model, mode='random_zero_mean', delta=1e-2)
def test_batchnorm_mode_0_or_2():
for mode in [0, 2]:
model = Sequential()
norm_m0 = normalization.BatchNormalization(mode=mode, input_shape=(10,), momentum=0.8)
model.add(norm_m0)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
model.fit(X, X, nb_epoch=4, verbose=0)
out = model.predict(X)
out -= K.eval(norm_m0.beta)
out /= K.eval(norm_m0.gamma)
assert_allclose(out.mean(), 0.0, atol=1e-1)
assert_allclose(out.std(), 1.0, atol=1e-1)
def test_shared_batchnorm():
'''Test that a BN layer can be shared
across different data streams.
'''
# Test single layer reuse
bn = normalization.BatchNormalization(input_shape=(10,), mode=0)
x1 = Input(shape=(10,))
bn(x1)
x2 = Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = Model(x2, y2)
assert len(model.updates) == 2
model.compile('sgd', 'mse')
model.train_on_batch(x, x)
# Test model-level reuse
x3 = Input(shape=(10,))
y3 = model(x3)
new_model = Model(x3, y3)
assert len(model.updates) == 2
new_model.compile('sgd', 'mse')
new_model.train_on_batch(x, x)
def nn_mlp(input_shape, params):
model = Sequential()
for i, layer_size in enumerate(params['layers']):
reg = regularizer(params)
if i == 0:
model.add(Dense(layer_size, init='he_normal', W_regularizer=reg, input_shape=input_shape))
else:
model.add(Dense(layer_size, init='he_normal', W_regularizer=reg))
if params.get('batch_norm', False):
model.add(BatchNormalization())
if 'dropouts' in params:
model.add(Dropout(params['dropouts'][i]))
model.add(PReLU())
model.add(Dense(1, init='he_normal'))
return model