def decode(y, relu_max):
assert len(y._keras_shape) == 2
latent_dim = y._keras_shape[-1]
x = Reshape((1, 1, latent_dim))(y)
# 1, 1, latent_dim
if relu_max:
x = Activation(utils.scale_up(relu_max))(x)
# not good? x = BN(mode=2, axis=3)(x)
batch_size = tf.shape(x)[0]
x = Deconv2D(40, 7, 7, output_shape=[batch_size, 7, 7, 40], activation='relu',
border_mode='same', subsample=(7,7))(x)
x = BN(mode=2, axis=3)(x)
# 7, 7, 40
x = Deconv2D(20, 3, 3, output_shape=[batch_size, 14, 14, 20], activation='relu',
border_mode='same', subsample=(2,2))(x)
x = BN(mode=2, axis=3)(x)
# 14, 14, 20
x = Deconv2D(1, 3, 3, output_shape=[batch_size, 28, 28, 1], activation='sigmoid',
border_mode='same', subsample=(2,2))(x)
# 28, 28, 1
return x
python类Deconvolution2D()的实例源码
def model_EES(input_col, input_row):
_input = Input(shape=(input_col, input_row, 1), name='input')
EES = Conv2D(nb_filter=8, nb_row=3, nb_col=3, init='he_normal',
activation='relu', border_mode='same', bias=True)(_input)
EES = Deconvolution2D(nb_filter=16, nb_row=14, nb_col=14, output_shape=(None, input_col * 2, input_row * 2, 16),
subsample=(2, 2), border_mode='same', init='glorot_uniform', activation='relu')(EES)
out = Conv2D(nb_filter=1, nb_row=5, nb_col=5, init='glorot_uniform', activation='relu', border_mode='same')(EES)
model = Model(input=_input, output=out)
# sgd = SGD(lr=0.0001, decay=0.005, momentum=0.9, nesterov=True)
Adam = adam(lr=0.001)
model.compile(optimizer=Adam, loss='mean_squared_error', metrics=['mean_squared_error'])
return model
def decode(y, relu_max):
print 'decoder input shape:', y._keras_shape
assert len(y._keras_shape) == 2
if relu_max:
x = GaussianNoise(0.2)(y)
# x = Activation(utils.relu_n(1))(x)
else:
x = y
x = Reshape((1, 1, LATENT_DIM))(x)
# 1, 1, LATENT_DIM
if relu_max:
print 'in decode: relu_max:', relu_max
x = Activation(utils.scale_up(relu_max))(x)
# x = BN(mode=2, axis=3)(x) # this bn seems not good? NOT VERIFIED
# why use 512 instead of 256 here?
batch_size = keras.backend.shape(x)[0]
x = Deconv2D(512, 4, 4, output_shape=[batch_size, 4, 4, 512],
activation='relu', border_mode='same', subsample=(4,4))(x)
x = BN(mode=2, axis=3)(x)
# 4, 4, 512
x = Deconv2D(256, 5, 5, output_shape=[batch_size, 8, 8, 256],
activation='relu', border_mode='same', subsample=(2,2))(x)
x = BN(mode=2, axis=3)(x)
# 8, 8, 256
x = Deconv2D(128, 5, 5, output_shape=(batch_size, 16, 16, 128),
activation='relu', border_mode='same', subsample=(2,2))(x)
x = BN(mode=2, axis=3)(x)
# 16, 16, 256
x = Deconv2D(64, 5, 5, output_shape=(batch_size, 32, 32, 64),
activation='relu', border_mode='same', subsample=(2,2))(x)
x = BN(mode=2, axis=3)(x)
# 32, 32, 64
x = Deconv2D(3, 5, 5, output_shape=(batch_size, 32, 32, 3),
activation='linear', border_mode='same', subsample=(1,1))(x)
# 32, 32, 3
x = BN(mode=2, axis=3)(x)
return x
def deep_decoder1(input_shape):
encoded = Input(shape=input_shape)
print 'decoder input shape:', encoded._keras_shape
batch_size = tf.shape(encoded)[0]
x = BatchNormalization(mode=2, axis=3)(encoded)
h, w, _ = encoded._keras_shape[1:]
x = Deconv2D(32, 1, 1, output_shape=[batch_size, h, w, 32],
activation='relu', border_mode='same')(x)
x = BatchNormalization(mode=2, axis=3)(x)
x = Deconv2D(32, 3, 3, output_shape=[batch_size, h, w, 32],
activation='relu', border_mode='same')(x)
x = BatchNormalization(mode=2, axis=3)(x)
h *= 2; w *= 2
x = Deconv2D(64, 3, 3, output_shape=(batch_size, h, w, 64),
activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
x = Deconv2D(64, 3, 3, output_shape=(batch_size, h, w, 64),
activation='relu', border_mode='same', subsample=(1, 1))(x)
x = BatchNormalization(mode=2, axis=3)(x)
h *= 2; w *= 2
x = Deconv2D(32, 3, 3, output_shape=(batch_size, h, w, 32),
activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
x = Deconv2D(32, 3, 3, output_shape=(batch_size, h, w, 32),
activation='relu', border_mode='same', subsample=(1, 1))(x)
x = BatchNormalization(mode=2, axis=3)(x)
x = Deconv2D(3, 3, 3, output_shape=(batch_size, 32, 32, 3),
activation='linear', border_mode='same', subsample=(1, 1))(x)
x = BatchNormalization(mode=2, axis=3)(x)
decoded = x
return Model(encoded, decoded)
def deep_decoder2(input_shape):
encoded = Input(shape=input_shape)
print 'encoded shape:', encoded.get_shape().as_list()
x = encoded
# x = BatchNormalization(mode=2, axis=3)(encoded)
# batch_size, h, w, _ = tf.shape(x)
batch_size = tf.shape(x)[0]
# dim: (1, 1, 512)
x = Deconv2D(512, 4, 4, output_shape=[batch_size, 4, 4, 512],
activation='relu', border_mode='same', subsample=(4, 4))(encoded)
x = BatchNormalization(mode=2, axis=3)(x)
# (4, 4, 512)
x = Deconv2D(256, 5, 5, output_shape=[batch_size, 8, 8, 256],
activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# dim: (8, 8, 236)
# h *= 2; w *= 2
x = Deconv2D(128, 5, 5, output_shape=(batch_size, 16, 16, 128),
activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# dim: (16, 16, 256)
x = Deconv2D(64, 5, 5, output_shape=(batch_size, 32, 32, 64),
activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# dim: (32, 32, 64)
x = Deconv2D(3, 5, 5, output_shape=(batch_size, 32, 32, 3),
activation='linear', border_mode='same', subsample=(1, 1))(x)
decoded = BatchNormalization(mode=2, axis=3)(x)
return Model(encoded, decoded)
def model_EED(input_col, input_row):
_input = Input(shape=(input_col, input_row, 1), name='input')
Feature = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform',
activation='relu', border_mode='same', bias=True)(_input)
Feature = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform',
activation='relu', border_mode='same', bias=True)(Feature)
Feature3 = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform',
activation='relu', border_mode='same', bias=True)(Feature)
Feature_out = merge(inputs=[Feature, Feature3], mode='sum')
# Upsampling
Upsampling1 = Conv2D(nb_filter=8, nb_row=1, nb_col=1, init='glorot_uniform',
activation='relu', border_mode='same', bias=True)(Feature_out)
Upsampling2 = Deconvolution2D(nb_filter=8, nb_row=14, nb_col=14,
output_shape=(None, input_col * 2, input_row * 2, 8),
subsample=(2, 2), border_mode='same',
init='glorot_uniform', activation='relu')(Upsampling1)
Upsampling3 = Conv2D(nb_filter=64, nb_row=1, nb_col=1, init='glorot_uniform',
activation='relu', border_mode='same', bias=True)(Upsampling2)
# Mulyi-scale Reconstruction
Reslayer1 = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform',
activation='relu', border_mode='same', bias=True)(Upsampling3)
Reslayer2 = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform',
activation='relu', border_mode='same', bias=True)(Reslayer1)
Block1 = merge(inputs=[Reslayer1, Reslayer2], mode='sum')
Reslayer3 = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform',
activation='relu', border_mode='same', bias=True)(Block1)
Reslayer4 = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform',
activation='relu', border_mode='same', bias=True)(Reslayer3)
Block2 = merge(inputs=[Reslayer3, Reslayer4], mode='sum')
# ***************//
Multi_scale1 = Conv2D(nb_filter=16, nb_row=1, nb_col=1, init='glorot_uniform',
activation='relu', border_mode='same', bias=True)(Block2)
Multi_scale2a = Conv2D(nb_filter=16, nb_row=1, nb_col=1, init='glorot_uniform',
activation='relu', border_mode='same', bias=True)(Multi_scale1)
Multi_scale2b = Conv2D(nb_filter=16, nb_row=3, nb_col=3, init='glorot_uniform',
activation='relu', border_mode='same', bias=True)(Multi_scale1)
Multi_scale2c = Conv2D(nb_filter=16, nb_row=5, nb_col=5, init='glorot_uniform',
activation='relu', border_mode='same', bias=True)(Multi_scale1)
Multi_scale2d = Conv2D(nb_filter=16, nb_row=7, nb_col=7, init='glorot_uniform',
activation='relu', border_mode='same', bias=True)(Multi_scale1)
Multi_scale2 = merge(inputs=[Multi_scale2a, Multi_scale2b, Multi_scale2c, Multi_scale2d], mode='concat')
out = Conv2D(nb_filter=1, nb_row=1, nb_col=1, init='glorot_uniform',
activation='relu', border_mode='same', bias=True)(Multi_scale2)
model = Model(input=_input, output=out)
Adam = adam(lr=0.001)
model.compile(optimizer=Adam, loss='mean_squared_error', metrics=['mean_squared_error'])
return model
def deep_model2(input_shape):
input_img = Input(shape=input_shape)
print 'input shape:', input_img._keras_shape
# 32, 32
x = Conv2D(32, 3, 3, activation='relu', border_mode='same', subsample=(2, 2))(input_img)
x = BatchNormalization(mode=2, axis=3)(x)
# 16, 16
x = Conv2D(64, 3, 3, activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# 8, 8
x = Conv2D(128, 3, 3, activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# 4, 4
# latent_dim = (1, 1, 1024)
x = Conv2D(1024, 4, 4, activation='linear', border_mode='same', subsample=(4, 4))(x)
x = GaussianNoise(0.1)(x)
encoded = Activation('sigmoid')(x)
print 'encoded shape:', encoded.get_shape().as_list()
# x = BatchNormalization(mode=2, axis=3)(encoded)
# batch_size, h, w, _ = tf.shape(x)
batch_size = tf.shape(encoded)[0]
# dim: (1, 1, 512)
x = Deconv2D(512, 4, 4, output_shape=[batch_size, 4, 4, 512],
activation='relu', border_mode='same', subsample=(4, 4))(encoded)
x = BatchNormalization(mode=2, axis=3)(x)
# (4, 4, 512)
x = Deconv2D(256, 5, 5, output_shape=[batch_size, 8, 8, 256],
activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# dim: (8, 8, 256)
x = Deconv2D(128, 5, 5, output_shape=(batch_size, 16, 16, 128),
activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# dim: (16, 16, 256)
x = Deconv2D(64, 5, 5, output_shape=(batch_size, 32, 32, 64),
activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# dim: (32, 32, 64)
x = Deconv2D(3, 5, 5, output_shape=(batch_size, 32, 32, 3),
activation='linear', border_mode='same', subsample=(1, 1))(x)
decoded = BatchNormalization(mode=2, axis=3)(x)
print 'decoded shape:', decoded.get_shape().as_list()
autoencoder = Model(input_img, decoded)
return autoencoder
def relu_deep_model1(input_shape, relu_max):
input_img = Input(shape=input_shape)
print 'input shape:', input_img._keras_shape
# 32, 32
x = Conv2D(64, 3, 3, activation='relu', border_mode='same', subsample=(2, 2))(input_img)
x = BatchNormalization(mode=2, axis=3)(x)
# 16, 16
x = Conv2D(128, 3, 3, activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# 8, 8
x = Conv2D(256, 3, 3, activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# 4, 4
# latent_dim = (1, 1, 1024)
x = Conv2D(1024, 4, 4, activation='linear',
border_mode='same', subsample=(4, 4))(x)
x = GaussianNoise(0.2)(x)
# encoded = Activation('relu')(x)
encoded = Activation(relu_n(relu_max))(x)
print 'encoded shape:', encoded.get_shape().as_list()
# in the origianl design, no BN as the first layer of decoder because of bug
x = encoded
# x = BatchNormalization(mode=2, axis=3)(encoded)
# batch_size, h, w, _ = tf.shape(x)
batch_size = tf.shape(x)[0]
# dim: (1, 1, 512)
x = Deconv2D(512, 4, 4, output_shape=[batch_size, 4, 4, 512],
activation='relu', border_mode='same', subsample=(4, 4))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# (4, 4, 512)
x = Deconv2D(256, 5, 5, output_shape=[batch_size, 8, 8, 256],
activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# dim: (8, 8, 256)
x = Deconv2D(128, 5, 5, output_shape=(batch_size, 16, 16, 128),
activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# dim: (16, 16, 256)
x = Deconv2D(64, 5, 5, output_shape=(batch_size, 32, 32, 64),
activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# dim: (32, 32, 64)
x = Deconv2D(3, 5, 5, output_shape=(batch_size, 32, 32, 3),
activation='linear', border_mode='same', subsample=(1, 1))(x)
decoded = BatchNormalization(mode=2, axis=3)(x)
print 'decoded shape:', decoded.get_shape().as_list()
autoencoder = Model(input_img, decoded)
return autoencoder
def deep_model1(input_shape):
input_img = Input(shape=input_shape)
print 'input shape:', input_img._keras_shape
# 32, 32
x = Conv2D(32, 3, 3, activation='relu', border_mode='same', subsample=(2, 2))(input_img)
x = BatchNormalization(mode=2, axis=3)(x)
# 16, 16
x = Conv2D(64, 3, 3, activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# 8, 8
x = Conv2D(128, 3, 3, activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# 4, 4
latent_dim = (1, 1, 1024)
z_mean = Conv2D(1024, 4, 4, activation='linear',
border_mode='same', subsample=(4, 4))(x)
# z_mean = GaussianNoise(0.1)(z_mean)
# TODO: the next layer use 16K parameters, will it be a problem?
z_log_var = Conv2D(1024, 4, 4, activation='linear',
border_mode='same', subsample=(4, 4))(x)
z = Lambda(sampling_gaussian, output_shape=latent_dim)([z_mean, z_log_var])
print 'encoded shape:', z._keras_shape
# x = BatchNormalization(mode=2, axis=3)(z)
batch_size = tf.shape(z)[0]
h, w, _ = z._keras_shape[1:]
# dim: (1, 1, 512)
x = Deconv2D(512, 4, 4, output_shape=[batch_size, 4, 4, 512],
activation='relu', border_mode='same', subsample=(4, 4))(z)
x = BatchNormalization(mode=2, axis=3)(x)
# (4, 4, 512)
x = Deconv2D(256, 5, 5, output_shape=[batch_size, 8, 8, 256],
activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# dim: (8, 8, 256)
x = Deconv2D(128, 5, 5, output_shape=(batch_size, 16, 16, 128),
activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# dim: (16, 16, 256)
x = Deconv2D(64, 5, 5, output_shape=(batch_size, 32, 32, 64),
activation='relu', border_mode='same', subsample=(2, 2))(x)
x = BatchNormalization(mode=2, axis=3)(x)
# dim: (32, 32, 64)
x = Deconv2D(3, 5, 5, output_shape=(batch_size, 32, 32, 3),
activation='linear', border_mode='same', subsample=(1, 1))(x)
decoded = BatchNormalization(mode=2, axis=3)(x)
print 'decoded shape:', decoded._keras_shape
autoencoder = Model(input_img, decoded)
# define vae loss
def vae_loss(y, y_pred):
# TODO: generalize this function
recon_loss = K.sum(K.square(y_pred - y), axis=[1, 2, 3])
kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),
axis=[1, 2, 3])
print ('pre average loss shape:',
recon_loss.get_shape().as_list(),
kl_loss.get_shape().as_list())
return K.mean(recon_loss + kl_loss)
return autoencoder, vae_loss
def decode(y, relu_max):
print 'decoder input shape:', y._keras_shape
assert len(y._keras_shape) == 2
if relu_max:
x = GaussianNoise(0.2)(y)
x = Activation(utils.relu_n(1))(x)
else:
x = y
x = Reshape((1, 1, LATENT_DIM))(x)
# 1, 1, LATENT_DIM
if relu_max:
print 'in decode: relu_max:', relu_max
x = Activation(utils.scale_up(relu_max))(x)
# x = BN(mode=2, axis=3)(x) # this bn seems not good? NOT VERIFIED
# why use 512 instead of 256 here?
batch_size = keras.backend.shape(x)[0]
x = Deconv2D(512, 6, 6, output_shape=[batch_size, 6, 6, 512],
activation='relu', border_mode='same', subsample=(6,6))(x)
x = BN(mode=2, axis=3)(x)
# 6, 6, 512
x = Deconv2D(256, 5, 5, output_shape=[batch_size, 12, 12, 256],
activation='relu', border_mode='same', subsample=(2,2))(x)
x = BN(mode=2, axis=3)(x)
# 12, 12, 256
x = Deconv2D(128, 5, 5, output_shape=(batch_size, 24, 24, 128),
activation='relu', border_mode='same', subsample=(2,2))(x)
x = BN(mode=2, axis=3)(x)
# 24, 24, 128
x = Deconv2D(64, 5, 5, output_shape=(batch_size, 48, 48, 64),
activation='relu', border_mode='same', subsample=(2,2))(x)
x = BN(mode=2, axis=3)(x)
# 48, 48, 64
x = Deconv2D(32, 5, 5, output_shape=(batch_size, 96, 96, 32),
activation='relu', border_mode='same', subsample=(2,2))(x)
x = BN(mode=2, axis=3)(x)
# 96, 96, 32
x = Deconv2D(3, 5, 5, output_shape=(batch_size, 96, 96, 3),
activation='linear', border_mode='same', subsample=(1,1))(x)
# 32, 32, 3
x = BN(mode=2, axis=3)(x)
return x