def get_config(self):
config = {'name': self.__class__.__name__,
'nb_filter': self.nb_filter,
'nb_row': self.nb_row,
'nb_col': self.nb_col,
'init': self.init.__name__,
'activation': self.activation.__name__,
'border_mode': self.border_mode,
'subsample': self.subsample,
'dim_ordering': self.dim_ordering,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'activity_regularizer': self.activity_regularizer.get_config() if self.activity_regularizer else None,
'W_constraint': self.W_constraint.get_config() if self.W_constraint else None,
'b_constraint': self.b_constraint.get_config() if self.b_constraint else None}
base_config = super(Convolution2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
python类Convolution2D()的实例源码
def build_model(nb_filters=32, nb_pool=2, nb_conv=3):
C_1 = 64
C_2 = 32
C_3 = 16
c = Convolution2D(C_1, nb_conv, nb_conv, border_mode='same', input_shape=(3, 32, 32))
mp = MaxPooling2D(pool_size=(nb_pool, nb_pool))
c2 = Convolution2D(C_2, nb_conv, nb_conv, border_mode='same', input_shape=(3, 32, 32))
mp2 = MaxPooling2D(pool_size=(nb_pool, nb_pool))
d = Dense(100)
encoder = get_encoder(c, c2, d, mp, mp2)
decoder = get_decoder(C_1, C_2, C_3, c, c2, d, mp, mp2, nb_pool)
graph = Graph()
graph.add_input(name='input', input_shape=(3, 32, 32))
graph.add_node(encoder, name='encoder', input='input')
graph.add_node(decoder, name='decoder', input='encoder')
graph.add_output(name='autoencoder_feedback', input='decoder')
graph.compile('rmsprop', {'autoencoder_feedback': 'mean_squared_error'})
return graph
def build_model(nb_filters=32, nb_pool=2, nb_conv=3):
model = models.Sequential()
d = Dense(30)
c = Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='same', input_shape=(1, 28, 28))
mp =MaxPooling2D(pool_size=(nb_pool, nb_pool))
# ========= ENCODER ========================
model.add(c)
model.add(Activation('tanh'))
model.add(mp)
model.add(Dropout(0.25))
# ========= BOTTLENECK ======================
model.add(Flatten())
model.add(d)
model.add(Activation('tanh'))
# ========= BOTTLENECK^-1 =====================
model.add(DependentDense(nb_filters * 14 * 14, d))
model.add(Activation('tanh'))
model.add(Reshape((nb_filters, 14, 14)))
# ========= DECODER =========================
model.add(DePool2D(mp, size=(nb_pool, nb_pool)))
model.add(Deconvolution2D(c, border_mode='same'))
model.add(Activation('tanh'))
return model
def conv_block(ip, nb_filter, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, Relu 3x3, Conv2D, optional dropout
Args:
ip: Input keras tensor
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor with batch_norm, relu and convolution2d added
'''
x = Activation('relu')(ip)
x = Convolution2D(nb_filter, 3, 3, init="he_uniform", border_mode="same", bias=False,
W_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def transition_block(ip, nb_filter, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
x = Convolution2D(nb_filter, 1, 1, init="he_uniform", border_mode="same", bias=False,
W_regularizer=l2(weight_decay))(ip)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
return x
def conv2d_bn(x, nb_filter, num_row, num_col,
padding='same', strides=(1, 1), use_bias=False):
"""
Utility function to apply conv + BN.
(Slightly modified from https://github.com/fchollet/keras/blob/master/keras/applications/inception_v3.py)
"""
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
x = Convolution2D(nb_filter, (num_row, num_col),
strides=strides,
padding=padding,
use_bias=use_bias,
kernel_regularizer=regularizers.l2(0.00004),
kernel_initializer=initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='normal', seed=None))(x)
x = BatchNormalization(axis=channel_axis, momentum=0.9997, scale=False)(x)
x = Activation('relu')(x)
return x
def create_model(learning_rate=0.1, momentum=0.9):
model = Sequential()
model.add(Convolution2D(20, 9, 9, border_mode='same', input_shape=(3, SIZE, SIZE)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Convolution2D(50, 5, 5, activation = "relu"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(768, input_dim=3072, init='uniform', activation = 'relu'))
model.add(Dropout(0.1))
model.add(Dense(384, init = 'uniform', activation = 'relu', W_constraint=maxnorm(3)))
model.add(Dense(4))
model.add(Activation("softmax"))
sgd = SGD(lr=learning_rate, momentum=momentum, nesterov=True, decay=1e-6)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=["accuracy"])
return model
def discriminator_model():
model = Sequential()
model.add(Convolution2D(
64, 5, 5,
border_mode='same',
input_shape=(1, 28, 28)))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(128, 5, 5))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1024))
model.add(Activation('tanh'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
def two_conv_layer(x, nb_channels, kernel_size=3, stride=1, l2_reg=1e-4,
first=False):
if first: # Skip BN-Relu
out = x
else:
out = BatchNormalization()(x)
out = Activation('relu')(out)
out = Convolution2D(nb_channels, kernel_size, kernel_size,
subsample=(stride, stride),
border_mode='same', init='he_normal',
W_regularizer=l2(l2_reg), bias=False)(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Convolution2D(nb_channels, kernel_size, kernel_size,
border_mode='same', init='he_normal',
W_regularizer=l2(l2_reg), bias=False)(out)
return out
def downsample_block(x, nb_channels, kernel_size=3, bottleneck=True,
l2_reg=1e-4):
if bottleneck:
out = bottleneck_layer(x, nb_channels, kernel_size=kernel_size,
stride=2, l2_reg=l2_reg)
# The output channels is 4x bigger on this case
nb_channels = nb_channels * 4
else:
out = two_conv_layer(x, nb_channels, kernel_size=kernel_size,
stride=2, l2_reg=l2_reg)
# Projection on the shortcut
proj = Convolution2D(nb_channels, 1, 1, subsample=(2, 2),
border_mode='valid', init='he_normal',
W_regularizer=l2(l2_reg), bias=False)(x)
# proj = AveragePooling2D((1, 1), (2, 2))(x)
out = merge([proj, out], mode='sum')
return out
def resnet_model(nb_blocks, bottleneck=True, l2_reg=1e-4):
nb_channels = [16, 32, 64]
inputs = Input((32, 32, 3))
x = Convolution2D(16, 3, 3, border_mode='same', init='he_normal',
W_regularizer=l2(l2_reg), bias=False)(inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)
for n, f in zip(nb_channels, [True, False, False]):
x = block_stack(x, n, nb_blocks, bottleneck=bottleneck, l2_reg=l2_reg,
first=f)
# Last BN-Relu
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
x = Dense(10)(x)
x = Activation('softmax')(x)
model = Model(input=inputs, output=x)
return model
models.py 文件源码
项目:Super-Resolution-using-Generative-Adversarial-Networks
作者: titu1994
项目源码
文件源码
阅读 15
收藏 0
点赞 0
评论 0
def _upscale_block(self, ip, id):
'''
As per suggestion from http://distill.pub/2016/deconv-checkerboard/, I am swapping out
SubPixelConvolution to simple Nearest Neighbour Upsampling
'''
init = ip
x = Convolution2D(128, 3, 3, activation="linear", border_mode='same', name='sr_res_upconv1_%d' % id,
init=self.init)(init)
x = LeakyReLU(alpha=0.25, name='sr_res_up_lr_%d_1_1' % id)(x)
x = UpSampling2D(name='sr_res_upscale_%d' % id)(x)
#x = SubPixelUpscaling(r=2, channels=32)(x)
x = Convolution2D(128, 3, 3, activation="linear", border_mode='same', name='sr_res_filter1_%d' % id,
init=self.init)(x)
x = LeakyReLU(alpha=0.3, name='sr_res_up_lr_%d_1_2' % id)(x)
return x
def create_model(img_rows, img_cols):
model = Sequential() #initialize model
model.add(Convolution2D(4, 3, 3, border_mode='same', activation='relu', init='he_normal',
input_shape=(1, img_rows, img_cols)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(8, 3, 3, border_mode='same', activation='relu', init='he_normal'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(2))
model.add(Activation('softmax'))
adm = Adamax()
#sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=adm, loss='categorical_crossentropy')
return model
def test_export_model(self):
input = Input(shape=(3, 4, 4), name='input', dtype='float32')
out = Convolution2D(3, 3, 3,
init='he_normal', subsample=(1, 1), border_mode='same', activation='linear')(input)
out = Activation('relu')(out)
model = Model(input=[input], output=[out])
data_model_folder = dir + "/../fixture/model_export"
if K._BACKEND == 'tensorflow':
import tensorflow as tf
saver = tf.train.Saver()
else:
saver = None
export_model(model, data_model_folder, saver=saver)
os.remove(data_model_folder + '/archi.json')
os.remove(data_model_folder + '/last_weights.hdf5')
if K._BACKEND == 'tensorflow':
os.remove(data_model_folder + '/checkpoint')
os.remove(data_model_folder + '/tf-last_weights')
os.remove(data_model_folder + '/tf-last_weights.meta')
os.remove(data_model_folder + '/tf-model_graph')
os.remove(data_model_folder + '/tf-frozen_model.pb')
os.rmdir(data_model_folder)
def discriminator_model():
model = Sequential()
model.add(Convolution2D(
64, 5, 5,
border_mode='same',
input_shape=(1, 28, 28)))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(128, 5, 5))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1024))
model.add(Activation('tanh'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
def fire_module(x, fire_id, squeeze=16, expand=64, dim_ordering='tf'):
s_id = 'fire' + str(fire_id) + '/'
if dim_ordering is 'tf':
c_axis = 3
else:
c_axis = 1
x = Convolution2D(squeeze, 1, 1, border_mode='valid', name=s_id + sq1x1)(x)
x = Activation('relu', name=s_id + relu + sq1x1)(x)
left = Convolution2D(expand, 1, 1, border_mode='valid', name=s_id + exp1x1)(x)
left = Activation('relu', name=s_id + relu + exp1x1)(left)
right = Convolution2D(expand, 3, 3, border_mode='same', name=s_id + exp3x3)(x)
right = Activation('relu', name=s_id + relu + exp3x3)(right)
x = merge([left, right], mode='concat', concat_axis=c_axis, name=s_id + 'concat')
return x
# Original SqueezeNet from paper.
def VGG_16(weights_path=None):
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
print "convolution"
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
print"FLATTEN"
model.add(Dense(400, activation='relu'))
model.add(Dropout(0.5))
print"YO"
model.add(Dense(10, activation='softmax'))
return model
densenet_fc.py 文件源码
项目:Fully-Connected-DenseNets-Semantic-Segmentation
作者: titu1994
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def __transition_block(ip, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps
in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(ip)
x = Activation('relu')(x)
x = Convolution2D(int(nb_filter * compression), 1, 1, init="he_uniform", border_mode="same", bias=False,
W_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x
def inception_resnet_v2_B(input, scale_residual=True):
if K.image_dim_ordering() == "th":
channel_axis = 1
else:
channel_axis = -1
# Input is relu activation
init = input
ir1 = Convolution2D(192, 1, 1, activation='relu', border_mode='same')(input)
ir2 = Convolution2D(128, 1, 1, activation='relu', border_mode='same')(input)
ir2 = Convolution2D(160, 1, 7, activation='relu', border_mode='same')(ir2)
ir2 = Convolution2D(192, 7, 1, activation='relu', border_mode='same')(ir2)
ir_merge = merge([ir1, ir2], mode='concat', concat_axis=channel_axis)
ir_conv = Convolution2D(1152, 1, 1, activation='linear', border_mode='same')(ir_merge)
if scale_residual: ir_conv = Lambda(lambda x: x * 0.1)(ir_conv)
out = merge([init, ir_conv], mode='sum')
out = BatchNormalization(axis=channel_axis)(out)
out = Activation("relu")(out)
return out
def inception_resnet_v2_C(input, scale_residual=True):
if K.image_dim_ordering() == "th":
channel_axis = 1
else:
channel_axis = -1
# Input is relu activation
init = input
ir1 = Convolution2D(192, 1, 1, activation='relu', border_mode='same')(input)
ir2 = Convolution2D(192, 1, 1, activation='relu', border_mode='same')(input)
ir2 = Convolution2D(224, 1, 3, activation='relu', border_mode='same')(ir2)
ir2 = Convolution2D(256, 3, 1, activation='relu', border_mode='same')(ir2)
ir_merge = merge([ir1, ir2], mode='concat', concat_axis=channel_axis)
ir_conv = Convolution2D(2144, 1, 1, activation='linear', border_mode='same')(ir_merge)
if scale_residual: ir_conv = Lambda(lambda x: x * 0.1)(ir_conv)
out = merge([init, ir_conv], mode='sum')
out = BatchNormalization(axis=channel_axis)(out)
out = Activation("relu")(out)
return out