def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = Conv1D(k1,1,padding='same')(tensor_input)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv1D(k2,kernel_size,padding='same')(out)
pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input)
# out = merge([out,pooling],mode='sum')
out = add([out,pooling])
return out
python类add()的实例源码
def _shortcut(input, residual): # ??????????????????y=F(x)+x
"""Adds a shortcut between input and residual block and merges them with "sum"
"""
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
input_shape = K.int_shape(input)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
shortcut = input
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001))(input)
return add([shortcut, residual])
def __initial_conv_block(input, k=1, dropout=0.0, initial=False):
init = input
channel_axis = 1 if K.image_dim_ordering() == 'th' else -1
# Check if input number of filters is same as 16 * k, else create convolution2d for this input
if initial:
if K.image_dim_ordering() == 'th':
init = Conv2D(16 * k, (1, 1), kernel_initializer='he_normal', padding='same')(init)
else:
init = Conv2D(16 * k, (1, 1), kernel_initializer='he_normal', padding='same')(init)
x = BatchNormalization(axis=channel_axis)(input)
x = Activation('relu')(x)
x = Conv2D(16 * k, (3, 3), padding='same', kernel_initializer='he_normal')(x)
if dropout > 0.0:
x = Dropout(dropout)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = Conv2D(16 * k, (3, 3), padding='same', kernel_initializer='he_normal')(x)
m = add([init, x])
return m
def _shortcut(input, residual):
"""Adds a shortcut between input and residual block and merges them with "sum"
"""
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
input_shape = K.int_shape(input)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
shortcut = input
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001))(input)
return add([shortcut, residual])
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = BatchNormalization()(x)
out = Activation('relu')(out)
out = Conv1D(k1,kernel_size,strides=2,padding='same')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv1D(k2,kernel_size,strides=2,padding='same')(out)
pooling = MaxPooling1D(pooling_size,strides=4,padding='same')(x)
out = add([out, pooling])
#out = merge([out,pooling])
return out
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = Conv1D(k1,1,padding='same')(tensor_input)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv1D(k2,kernel_size,padding='same')(out)
pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input)
# out = merge([out,pooling],mode='sum')
out = add([out,pooling])
return out
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = BatchNormalization()(x)
out = Activation('relu')(out)
out = Conv1D(k1,kernel_size,padding='same')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv1D(k2,kernel_size,padding='same')(out)
pooling = MaxPooling1D(pooling_size,padding='same')(x)
out = add([out, pooling])
#out = merge([out,pooling])
return out
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = BatchNormalization()(x)
out = Activation('relu')(out)
out = Conv2D(k1,kernel_size,padding='same',data_format='channels_last')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)
pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)
out = add([out, pooling])
#out = merge([out,pooling])
return out
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = Conv1D(k1,1,padding='same')(tensor_input)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv1D(k2,kernel_size,padding='same')(out)
pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input)
# out = merge([out,pooling],mode='sum')
out = add([out,pooling])
return out
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = BatchNormalization()(x)
out = Activation('relu')(out)
out = Conv1D(k1,kernel_size,padding='same')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv1D(k2,kernel_size,padding='same')(out)
pooling = MaxPooling1D(pooling_size,padding='same')(x)
out = add([out, pooling])
#out = merge([out,pooling])
return out
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)
pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)
# out = merge([out,pooling],mode='sum')
out = add([out,pooling])
return out
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = BatchNormalization()(x)
out = Activation('relu')(out)
out = Conv2D(k1,kernel_size,padding='same',data_format='channels_last')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)
pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)
out = add([out, pooling])
#out = merge([out,pooling])
return out
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = Conv1D(k1,1,padding='same')(tensor_input)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv1D(k2,kernel_size,strides=2,padding='same')(out)
pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(tensor_input)
# out = merge([out,pooling],mode='sum')
out = add([out,pooling])
return out
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=2,dropout=0.5):
k1,k2 = filters
out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv2D(k2,kernel_size,2,padding='same',data_format='channels_last')(out)
pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)
# out = merge([out,pooling],mode='sum')
out = add([out,pooling])
return out
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = BatchNormalization()(x)
out = Activation('relu')(out)
out = Conv2D(k1,kernel_size,2,padding='same',data_format='channels_last')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv2D(k2,kernel_size,2,padding='same',data_format='channels_last')(out)
pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)
out = add([out, pooling])
#out = merge([out,pooling])
return out
def create_model(self, epsilon):
"""Return a compiled model and the state and action input
layers with the given epsilon for numerical stability.
"""
inputs = Input(shape=(self.state_shape,))
action_input = Input(shape=(self.action_shape,))
x1 = Dense(self.neurons_per_layer[0], activation='relu')(inputs)
x1 = Dense(self.neurons_per_layer[1], activation='relu')(x1)
x2 = Dense(self.neurons_per_layer[1], activation='relu')(action_input)
x = add([x1, x2])
for n in self.neurons_per_layer[2:]:
x = Dense(n, activation='relu')(x)
outputs = Dense(self.action_shape)(x)
model = Model(inputs=[inputs, action_input], outputs=outputs)
assert self.optimizer_choice in ['adam', 'rmsprop']
if self.optimizer_choice == 'adam':
opti = Adam(lr=self.alpha, epsilon=epsilon)
else:
opti = RMSprop(lr=self.alpha, epsilon=epsilon)
model.compile(optimizer=opti, loss='mse')
return model, inputs, action_input
def create_model(img_height,img_width,img_channel):
ip = Input(shape=(img_height, img_width,img_channel))
L_1 = Conv2D(64, (9, 9), padding='same', activation='linear', kernel_initializer='glorot_uniform')(ip)
L_1 = LeakyReLU(alpha=0.25)(L_1)
L_2=L_1
for i in range(3):
L_2 = residual_block(L_2, 64,3)
L_3 = Conv2D(64, (3, 3), padding='same',kernel_initializer='glorot_uniform')(L_2)
L_3 = BatchNormalization(axis=-1)(L_3)
L_3 = add([L_1,L_3])
L_4= Conv2D(128, (1, 1), padding='same',kernel_initializer='glorot_uniform')(L_3)
op = Conv2D(img_channel, (9, 9),padding='same', activation='tanh', kernel_initializer='glorot_uniform')(L_4)
deblocking =Model(inputs=ip,outputs= op)
optimizer = optimizers.Adam(lr=1e-4)
deblocking.compile(optimizer=optimizer,loss='mean_squared_error', metrics=[psnr,ssim])
return deblocking
def create_model(img_height,img_width,img_channel):
ip = Input(shape=(img_height, img_width,img_channel))
x_1 = Conv2D(64, (9, 9), padding='same', activation='linear', kernel_initializer='glorot_uniform')(ip)
x_1 = LeakyReLU(alpha=0.25)(x_1)
x=x_1
for i in range(5):#or 15
x = residual_block(x, 64,3)
x = Conv2D(64, (3, 3), padding='same',kernel_initializer='glorot_uniform')(x)
x = BatchNormalization(axis=-1)(x)
x = add([x_1,x])
x=upscale(x)
op = Conv2D(img_channel, (9, 9),padding='same', activation='tanh', kernel_initializer='glorot_uniform')(x)
deblocking =Model(inputs=ip,outputs= op)
optimizer = optimizers.Adam(lr=1e-4)
deblocking.compile(optimizer=optimizer,loss='mean_squared_error', metrics=[psnr,ssim])
return deblocking
def _shortcut(input, residual):
"""Adds a shortcut between input and residual block and merges them with "sum"
"""
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
input_shape = K.int_shape(input)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
shortcut = input
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001))(input)
return add([shortcut, residual])
def __init__(self, **kwargs):
super(KerasLenetModel, self).__init__(**kwargs)
norm_shape = self.norm_shape
self.model = Sequential()
self.model.add(Convolution2D(32, (3, 3), activation='relu',
input_shape=(norm_shape[0], norm_shape[1], 1)))
self.model.add(Convolution2D(32, (3, 3), activation='relu'))
self.model.add(MaxPooling2D(pool_size=(2,2)))
self.model.add(Dropout(0.25))
self.model.add(Flatten())
self.model.add(Dense(128, activation='relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(self.max_n_label, activation='softmax'))
# 8. Compile model
self.model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
def unit_2(in_layer, n1=64, n2=64, n3=256, p2=1, d2=1):
'''
Shortcut Unit
:param in_layer:
:return:
'''
x = Conv2D(n1, (1, 1), strides=(1, 1), padding='valid', kernel_initializer=he_uniform(), use_bias=False)(in_layer)
x = BatchNormalization(momentum=0.95)(x)
x = Activation('relu')(x)
x = ZeroPadding2D(padding=(p2, p2))(x)
x = Conv2D(n2, (3, 3), strides=(1, 1), padding='valid', dilation_rate=(d2, d2), kernel_initializer=he_uniform(), use_bias=False)(x)
x = BatchNormalization(momentum=0.95)(x)
x = Activation('relu')(x)
x = Conv2D(n3, (1, 1), strides=(1, 1), padding='valid', kernel_initializer=he_uniform(), use_bias=False)(x)
x = BatchNormalization(momentum=0.95)(x)
x = add([in_layer, x])
x = Activation('relu')(x)
return x
def _shortcut(input, residual):
"""Adds a shortcut between input and residual block and merges them with "sum"
"""
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
input_shape = K.int_shape(input)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
shortcut = input
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001))(input)
return add([shortcut, residual])
def bottleneck(encoder, output, upsample=False, reverse_module=False):
internal = output // 4
x = Conv2D(internal, (1, 1), use_bias=False)(encoder)
x = BatchNormalization(momentum=0.1)(x)
x = Activation('relu')(x)
if not upsample:
x = Conv2D(internal, (3, 3), padding='same', use_bias=True)(x)
else:
x = Conv2DTranspose(filters=internal, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
x = BatchNormalization(momentum=0.1)(x)
x = Activation('relu')(x)
x = Conv2D(output, (1, 1), padding='same', use_bias=False)(x)
other = encoder
if encoder.get_shape()[-1] != output or upsample:
other = Conv2D(output, (1, 1), padding='same', use_bias=False)(other)
other = BatchNormalization(momentum=0.1)(other)
if upsample and reverse_module is not False:
other = UpSampling2D(size=(2, 2))(other)
if upsample and reverse_module is False:
decoder = x
else:
x = BatchNormalization(momentum=0.1)(x)
decoder = add([x, other])
decoder = Activation('relu')(decoder)
return decoder
def bottleneck(encoder, output, upsample=False, reverse_module=False):
internal = output // 4
x = Conv2D(internal, (1, 1), use_bias=False)(encoder)
x = BatchNormalization(momentum=0.1)(x)
# x = Activation('relu')(x)
x = PReLU(shared_axes=[1, 2])(x)
if not upsample:
x = Conv2D(internal, (3, 3), padding='same', use_bias=True)(x)
else:
x = Conv2DTranspose(filters=internal, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
x = BatchNormalization(momentum=0.1)(x)
# x = Activation('relu')(x)
x = PReLU(shared_axes=[1, 2])(x)
x = Conv2D(output, (1, 1), padding='same', use_bias=False)(x)
other = encoder
if encoder.get_shape()[-1] != output or upsample:
other = Conv2D(output, (1, 1), padding='same', use_bias=False)(other)
other = BatchNormalization(momentum=0.1)(other)
if upsample and reverse_module is not False:
other = MaxUnpooling2D()([other, reverse_module])
if upsample and reverse_module is False:
decoder = x
else:
x = BatchNormalization(momentum=0.1)(x)
decoder = add([x, other])
# decoder = Activation('relu')(decoder)
decoder = PReLU(shared_axes=[1, 2])(decoder)
return decoder
def _shortcut(input_feature, residual, conv_name_base=None, bn_name_base=None):
"""Adds a shortcut between input and residual block and merges them with "sum"
"""
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
input_shape = K.int_shape(input_feature)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
shortcut = input_feature
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
print('reshaping via a convolution...')
if conv_name_base is not None:
conv_name_base = conv_name_base + '1'
shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001),
name=conv_name_base)(input_feature)
if bn_name_base is not None:
bn_name_base = bn_name_base + '1'
shortcut = BatchNormalization(axis=CHANNEL_AXIS, name=bn_name_base)(shortcut)
return add([shortcut, residual])
def __conv2_block(input, k=1, dropout=0.0):
init = input
channel_axis = 1 if K.image_dim_ordering() == 'th' else -1
# Check if input number of filters is same as 16 * k, else create convolution2d for this input
if K.image_dim_ordering() == 'th':
if init._keras_shape[1] != 16 * k:
init = Conv2D(16 * k, (1, 1), activation='linear', padding='same')(init)
else:
if init._keras_shape[-1] != 16 * k:
init = Conv2D(16 * k, (1, 1), activation='linear', padding='same')(init)
x = Conv2D(16 * k, (3, 3), padding='same')(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
if dropout > 0.0:
x = Dropout(dropout)(x)
x = Conv2D(16 * k, (3, 3), padding='same')(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
m = add([init, x])
return m
def __conv3_block(input, k=1, dropout=0.0):
init = input
channel_axis = 1 if K.image_dim_ordering() == 'th' else -1
# Check if input number of filters is same as 32 * k, else create convolution2d for this input
if K.image_dim_ordering() == 'th':
if init._keras_shape[1] != 32 * k:
init = Conv2D(32 * k, (1, 1), activation='linear', padding='same')(init)
else:
if init._keras_shape[-1] != 32 * k:
init = Conv2D(32 * k, (1, 1), activation='linear', padding='same')(init)
x = Conv2D(32 * k, (3, 3), padding='same')(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
if dropout > 0.0:
x = Dropout(dropout)(x)
x = Conv2D(32 * k, (3, 3), padding='same')(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
m = add([init, x])
return m
def ___conv4_block(input, k=1, dropout=0.0):
init = input
channel_axis = 1 if K.image_dim_ordering() == 'th' else -1
# Check if input number of filters is same as 64 * k, else create convolution2d for this input
if K.image_dim_ordering() == 'th':
if init._keras_shape[1] != 64 * k:
init = Conv2D(64 * k, (1, 1), activation='linear', padding='same')(init)
else:
if init._keras_shape[-1] != 64 * k:
init = Conv2D(64 * k, (1, 1), activation='linear', padding='same')(init)
x = Conv2D(64 * k, (3, 3), padding='same')(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
if dropout > 0.0:
x = Dropout(dropout)(x)
x = Conv2D(64 * k, (3, 3), padding='same')(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
m = add([init, x])
return m
def build_main_residual_network(batch_size,
time_step,
input_dim,
output_dim,
loop_depth=15,
dropout=0.3):
inp = Input(shape=(time_step,input_dim))
# add mask for filter invalid data
out = TimeDistributed(Masking(mask_value=0))(inp)
out = Conv1D(128,5)(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = first_block(out,(64,128),dropout=dropout)
for _ in range(loop_depth):
out = repeated_block(out,(64,128),dropout=dropout)
# add flatten
out = Flatten()(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dense(output_dim)(out)
model = Model(inp,out)
model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
return model
def build_2d_main_residual_network(batch_size,
width,
height,
channel_size,
output_dim,
loop_depth=15,
dropout=0.3):
inp = Input(shape=(width,height,channel_size))
# add mask for filter invalid data
out = TimeDistributed(Masking(mask_value=0))(inp)
out = Conv2D(128,5,data_format='channels_last')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = first_2d_block(out,(64,128),dropout=dropout)
for _ in range(loop_depth):
out = repeated_2d_block(out,(64,128),dropout=dropout)
# add flatten
out = Flatten()(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dense(output_dim)(out)
model = Model(inp,out)
model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
return model