def test_keras_import(self):
# Upsample 1D
model = Sequential()
model.add(UpSampling1D(size=2, input_shape=(1, 16)))
model.build()
self.keras_param_test(model, 0, 2)
# Upsample 2D
model = Sequential()
model.add(UpSampling2D(size=(2, 2), input_shape=(1, 16, 16)))
model.build()
self.keras_param_test(model, 0, 3)
# Upsample 3D
model = Sequential()
model.add(UpSampling3D(size=(2, 2, 2), input_shape=(1, 16, 16, 16)))
model.build()
self.keras_param_test(model, 0, 4)
# ********** Pooling Layers **********
python类UpSampling2D()的实例源码
def upsample(layer, layer_in, layerId):
upsampleMap = {
'1D': UpSampling1D,
'2D': UpSampling2D,
'3D': UpSampling3D
}
out = {}
layer_type = layer['params']['layer_type']
if (layer_type == '1D'):
size = layer['params']['size_w']
elif (layer_type == '2D'):
size = (layer['params']['size_h'], layer['params']['size_w'])
else:
size = (layer['params']['size_h'], layer['params']['size_w'],
layer['params']['size_d'])
out[layerId] = upsampleMap[layer_type](size=size)(*layer_in)
return out
# ********** Pooling Layers **********
def double_conv_layer(x, size, dropout, batch_norm):
from keras.models import Model
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Dropout, Activation
conv = Convolution2D(size, 3, 3, border_mode='same')(x)
if batch_norm == True:
conv = BatchNormalization(mode=0, axis=1)(conv)
conv = Activation('relu')(conv)
conv = Convolution2D(size, 3, 3, border_mode='same')(conv)
if batch_norm == True:
conv = BatchNormalization(mode=0, axis=1)(conv)
conv = Activation('relu')(conv)
if dropout > 0:
conv = Dropout(dropout)(conv)
return conv
def keepsize_256(nx, ny, noise, depth, activation='relu', n_filters=64, l2_reg=1e-7):
"""
Deep residual network that keeps the size of the input throughout the whole network
"""
def residual(inputs, n_filters):
x = ReflectionPadding2D()(inputs)
x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x = BatchNormalization()(x)
x = Activation(activation)(x)
x = ReflectionPadding2D()(x)
x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x = BatchNormalization()(x)
x = add([x, inputs])
return x
inputs = Input(shape=(nx, ny, 1))
x = GaussianNoise(noise)(inputs)
x = ReflectionPadding2D()(x)
x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x0 = Activation(activation)(x)
x = residual(x0, n_filters)
for i in range(depth-1):
x = residual(x, n_filters)
x = ReflectionPadding2D()(x)
x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x = BatchNormalization()(x)
x = add([x, x0])
# Upsampling for superresolution
x = UpSampling2D()(x)
x = ReflectionPadding2D()(x)
x = Conv2D(4*n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x = Activation(activation)(x)
final = Conv2D(1, (1, 1), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
return Model(inputs=inputs, outputs=final)
def create_network():
input_img = Input(shape=INPUT_SHAPE)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
model = Model(input_img, decoded)
model.compile(optimizer='adadelta', loss='binary_crossentropy')
return KerasNetwork(model, 'weights_conv_autoencoder.hd5')
def test_tiny_conv_upsample_random(self):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels = 3
kernel_height = 5
kernel_width = 5
# Define a model
model = Sequential()
model.add(Conv2D(input_shape = input_shape,
filters = num_kernels, kernel_size = (kernel_height, kernel_width)))
model.add(UpSampling2D(size = 2))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_keras_model(model)
def mnist_generator(input_shape=(28, 28, 1), scale=1/4):
x0 = Input(input_shape)
x = Conv2D(int(128*scale), (3, 3), strides=(2, 2), padding='same')(x0)
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = Conv2D(int(64*scale), (3, 3), strides=(2, 2), padding='same')(x)
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = residual_block(x, scale, num_id=2)
x = residual_block(x, scale*2, num_id=3)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(int(1024*scale), (1, 1))(x)
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(1, (1, 1), activation='sigmoid')(x)
return Model(x0, x)
def _up_block(block,mrge, nb_filters):
up = merge([Convolution2D(2*nb_filters, 2, 2, border_mode='same')(UpSampling2D(size=(2, 2))(block)), mrge], mode='concat', concat_axis=1)
# conv = Convolution2D(4*nb_filters, 1, 1, activation='relu', border_mode='same')(up)
conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(up)
conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(conv)
# conv = Convolution2D(4*nb_filters, 1, 1, activation='relu', border_mode='same')(conv)
# conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(conv)
# conv = Convolution2D(nb_filters, 1, 1, activation='relu', border_mode='same')(conv)
# conv = Convolution2D(4*nb_filters, 1, 1, activation='relu', border_mode='same')(conv)
# conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(conv)
# conv = Convolution2D(nb_filters, 1, 1, activation='relu', border_mode='same')(conv)
return conv
# http://arxiv.org/pdf/1512.03385v1.pdf
# 50 Layer resnet
def build_generator(latent_size):
cnn = Sequential()
cnn.add(Dense(1024, input_dim=latent_size, activation='relu'))
cnn.add(Dense(128 * 7 * 7, activation='relu'))
cnn.add(Reshape((128, 7, 7)))
# upsample to (..., 14, 14)
cnn.add(UpSampling2D(size=(2, 2)))
cnn.add(Conv2D(256, 5, padding='same',
activation='relu', kernel_initializer='glorot_normal'))
# upsample to (..., 28, 28)
cnn.add(UpSampling2D(size=(2, 2)))
cnn.add(Conv2D(128, 5, padding='same',
activation='relu', kernel_initializer='glorot_normal'))
# take a channel axis reduction
cnn.add(Conv2D(1, 2, padding='same',
activation='tanh', kernel_initializer='glorot_normal'))
# this is the z space commonly refered to in GAN papers
latent = Input(shape=(latent_size,))
fake_image = cnn(latent)
return Model(inputs=latent, outputs=fake_image)
def build_decoder(self,input_shape):
"this function did not converge well. sigh"
data_dim = np.prod(input_shape)
last_convolution = 1 + np.array(input_shape) // 4
first_convolution = last_convolution * 4
diff = tuple(first_convolution - input_shape)
crop = [[0,0],[0,0]]
for i in range(2):
if diff[i] % 2 == 0:
for j in range(2):
crop[i][j] = diff[i] // 2
else:
crop[i][0] = diff[i] // 2
crop[i][1] = diff[i] // 2 + 1
crop = ((crop[0][0],crop[0][1]),(crop[1][0],crop[1][1]))
print(last_convolution,first_convolution,diff,crop)
return [*([Dropout(self.parameters['dropout'])] if self.parameters['dropout_z'] else []),
*[Dense(self.parameters['layer'], activation='relu', use_bias=False),
BN(),
Dropout(self.parameters['dropout']),],
*[Dense(np.prod(last_convolution) * self.parameters['clayer'], activation='relu', use_bias=False),
BN(),
Dropout(self.parameters['dropout']),],
Reshape((*last_convolution, self.parameters['clayer'])),
*[UpSampling2D((2,2)),
Deconvolution2D(self.parameters['clayer'],(3,3), activation='relu',padding='same', use_bias=False),
BN(),
Dropout(self.parameters['dropout']),],
*[UpSampling2D((2,2)),
Deconvolution2D(1,(3,3), activation='sigmoid',padding='same'),],
Cropping2D(crop),
Reshape(input_shape),]
def conv_autoencoder(X):
X = X.reshape(X.shape[0], 28, 28, 1)
inputs = Input(shape=(28, 28, 1))
h = Conv2D(4, 3, 3, activation='relu', border_mode='same')(inputs)
encoded = MaxPooling2D((2, 2))(h)
h = Conv2D(4, 3, 3, activation='relu', border_mode='same')(encoded)
h = UpSampling2D((2, 2))(h)
outputs = Conv2D(1, 3, 3, activation='relu', border_mode='same')(h)
model = Model(input=inputs, output=outputs)
model.compile(optimizer='adam', loss='mse')
model.fit(X, X, batch_size=64, nb_epoch=5)
return model, Model(input=inputs, output=encoded)
def test_keras_export(self):
tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
'keras_export_test.json'), 'r')
response = json.load(tests)
tests.close()
net = yaml.safe_load(json.dumps(response['net']))
net = {'l0': net['Input'], 'l1': net['Input2'], 'l2': net['Input4'], 'l3': net['Upsample']}
# Conv 1D
net['l1']['connection']['output'].append('l3')
net['l3']['connection']['input'] = ['l1']
net['l3']['params']['layer_type'] = '1D'
inp = data(net['l1'], '', 'l1')['l1']
temp = upsample(net['l3'], [inp], 'l3')
model = Model(inp, temp['l3'])
self.assertEqual(model.layers[1].__class__.__name__, 'UpSampling1D')
# Conv 2D
net['l0']['connection']['output'].append('l0')
net['l3']['connection']['input'] = ['l0']
net['l3']['params']['layer_type'] = '2D'
inp = data(net['l0'], '', 'l0')['l0']
temp = upsample(net['l3'], [inp], 'l3')
model = Model(inp, temp['l3'])
self.assertEqual(model.layers[1].__class__.__name__, 'UpSampling2D')
# Conv 3D
net['l2']['connection']['output'].append('l3')
net['l3']['connection']['input'] = ['l2']
net['l3']['params']['layer_type'] = '3D'
inp = data(net['l2'], '', 'l2')['l2']
temp = upsample(net['l3'], [inp], 'l3')
model = Model(inp, temp['l3'])
self.assertEqual(model.layers[1].__class__.__name__, 'UpSampling3D')
def generator(self):
if self.G:
return self.G
self.G = Sequential()
dropout = 0.4
depth = 64+64+64+64
dim = 7
# In: 100
# Out: dim x dim x depth
self.G.add(Dense(dim*dim*depth, input_dim=100))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
self.G.add(Reshape((dim, dim, depth)))
self.G.add(Dropout(dropout))
# In: dim x dim x depth
# Out: 2*dim x 2*dim x depth/2
self.G.add(UpSampling2D())
self.G.add(Conv2DTranspose(int(depth/2), 5, padding='same'))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
self.G.add(UpSampling2D())
self.G.add(Conv2DTranspose(int(depth/4), 5, padding='same'))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
self.G.add(Conv2DTranspose(int(depth/8), 5, padding='same'))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
# Out: 28 x 28 x 1 grayscale image [0.0,1.0] per pix
self.G.add(Conv2DTranspose(1, 5, padding='same'))
self.G.add(Activation('sigmoid'))
self.G.summary()
return self.G
def build_models(self, input_shape):
middle_neurons = 10
self.encoder = Sequential()
self.encoder.add(Conv2D(64, (5, 5), strides=(2, 2), padding = 'same', input_shape=input_shape))
self.encoder.add(Activation(selu))
self.encoder.add(Conv2D(128, (5, 5), strides=(2, 2), padding = 'same'))
self.encoder.add(Activation(selu))
self.encoder.add(Flatten())
self.encoder.add(Dense(middle_neurons))
self.encoder.add(Activation('sigmoid'))
self.encoder.summary()
self.decoder = Sequential()
self.decoder.add(Dense(7*7*128, input_shape=(middle_neurons,)))
self.decoder.add(Activation(selu))
if keras.backend.image_data_format() == 'channels_first':
self.decoder.add(Reshape([128, 7, 7]))
else:
self.decoder.add(Reshape([7, 7, 128]))
self.decoder.add(UpSampling2D(size=(2, 2)))
self.decoder.add(Conv2D(64, (5, 5), padding='same'))
self.decoder.add(Activation(selu))
self.decoder.add(UpSampling2D(size=(2, 2)))
self.decoder.add(Conv2D(1, (5, 5), padding='same'))
self.decoder.add(Activation('sigmoid'))
self.decoder.summary()
self.autoencoder = Sequential()
self.autoencoder.add(self.encoder)
self.autoencoder.add(self.decoder)
self.autoencoder.compile(loss='mean_squared_error',
optimizer=Adam(lr=1e-4),
metrics=['accuracy'])
def keepsize(nx, ny, noise, depth, activation='relu', n_filters=64, l2_reg=1e-7):
"""
Deep residual network that keeps the size of the input throughout the whole network
"""
def residual(inputs, n_filters):
x = ReflectionPadding2D()(inputs)
x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x = BatchNormalization()(x)
x = Activation(activation)(x)
x = ReflectionPadding2D()(x)
x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x = BatchNormalization()(x)
x = add([x, inputs])
return x
inputs = Input(shape=(nx, ny, 1))
x = GaussianNoise(noise)(inputs)
x = ReflectionPadding2D()(x)
x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x0 = Activation(activation)(x)
x = residual(x0, n_filters)
for i in range(depth-1):
x = residual(x, n_filters)
x = ReflectionPadding2D()(x)
x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x = BatchNormalization()(x)
x = add([x, x0])
# Upsampling for superresolution
x = UpSampling2D()(x)
x = ReflectionPadding2D()(x)
x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x = Activation(activation)(x)
final = Conv2D(1, (1, 1), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
return Model(inputs=inputs, outputs=final)
def test_delete_channels_upsampling2d(channel_index, data_format):
layer = UpSampling2D([2, 3], data_format=data_format)
layer_test_helper_flatten_2d(layer, channel_index, data_format)
def Unet (nClasses , optimizer=None , input_width=360 , input_height=480 , nChannels=1 ):
inputs = Input((nChannels, input_height, input_width))
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Dropout(0.2)(conv1)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Dropout(0.2)(conv2)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Dropout(0.2)(conv3)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
up1 = merge([UpSampling2D(size=(2, 2))(conv3), conv2], mode='concat', concat_axis=1)
conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up1)
conv4 = Dropout(0.2)(conv4)
conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv4)
up2 = merge([UpSampling2D(size=(2, 2))(conv4), conv1], mode='concat', concat_axis=1)
conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up2)
conv5 = Dropout(0.2)(conv5)
conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv5)
conv6 = Convolution2D(nClasses, 1, 1, activation='relu',border_mode='same')(conv5)
conv6 = core.Reshape((nClasses,input_height*input_width))(conv6)
conv6 = core.Permute((2,1))(conv6)
conv7 = core.Activation('softmax')(conv6)
model = Model(input=inputs, output=conv7)
if not optimizer is None:
model.compile(loss="categorical_crossentropy", optimizer= optimizer , metrics=['accuracy'] )
return model
def test_upsample(self):
"""
Test the conversion of 2D convolutional layer + upsample
"""
from keras.layers import Convolution2D, UpSampling2D
# Create a simple Keras model
model = Sequential()
model.add(Convolution2D(input_shape=(64, 64, 3), nb_filter=32,
nb_row=5, nb_col=5))
model.add(UpSampling2D(size = (2, 2)))
input_names = ['input']
output_names = ['output']
spec = keras.convert(model, input_names, output_names).get_spec()
self.assertIsNotNone(spec)
# Test the model class
self.assertIsNotNone(spec.description)
self.assertTrue(spec.HasField('neuralNetwork'))
# Test the inputs and outputs
self.assertEquals(len(spec.description.input), len(input_names))
self.assertItemsEqual(input_names,
map(lambda x: x.name, spec.description.input))
self.assertEquals(len(spec.description.output), len(output_names))
self.assertItemsEqual(output_names,
map(lambda x: x.name, spec.description.output))
# Test the layer parameters.
layers = spec.neuralNetwork.layers
layer_0 = layers[0]
self.assertIsNotNone(layer_0.convolution)
layer_1 = layers[1]
self.assertIsNotNone(layer_1.upsample)
def test_upsample(self):
"""
Test the conversion of 2D convolutional layer + upsample
"""
from keras.layers import Conv2D, UpSampling2D
# Create a simple Keras model
model = Sequential()
model.add(Conv2D(input_shape=(64, 64, 3), filters=32,
kernel_size=(5,5)))
model.add(UpSampling2D(size = (2, 2)))
input_names = ['input']
output_names = ['output']
spec = keras.convert(model, input_names, output_names).get_spec()
self.assertIsNotNone(spec)
# Test the model class
self.assertIsNotNone(spec.description)
self.assertTrue(spec.HasField('neuralNetwork'))
# Test the inputs and outputs
self.assertEquals(len(spec.description.input), len(input_names))
self.assertEqual(sorted(input_names),
sorted(map(lambda x: x.name, spec.description.input)))
self.assertEquals(len(spec.description.output), len(output_names))
self.assertEqual(sorted(output_names),
sorted(map(lambda x: x.name, spec.description.output)))
# Test the layer parameters.
layers = spec.neuralNetwork.layers
layer_0 = layers[0]
self.assertIsNotNone(layer_0.convolution)
layer_1 = layers[1]
self.assertIsNotNone(layer_1.upsample)
def test_upsample_layer_params(self):
options = dict(
size= [(2,2), (3,3), (4,4), (5,5)]
)
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
X = np.random.rand(1, *input_shape)
# Define a function that tests a model
def build_model(x):
kwargs = dict(zip(options.keys(), x))
model = Sequential()
model.add(Conv2D(filters=5, kernel_size=(7,7),
input_shape = input_shape))
model.add(UpSampling2D(**kwargs))
return x, model
# Iterate through all combinations
product = itertools.product(*options.values())
args = [build_model(p) for p in product]
# Test the cases
print("Testing a total of %s cases. This could take a while" % len(args))
for param, model in args:
self._run_test(model, param)
def scaleup(input, ngf, kss, strides, padding):
# x = Conv2DTranspose(ngf, kss, strides=strides, padding=padding)(input)
# upsample + conv
x = UpSampling2D(strides)(input)
x = Conv2D(ngf, kss, padding=padding)(x)
return x
def build_conv_autoencoder(input_dim=(28, 28, 1)):
input_img = Input(shape=input_dim) # adapt this if using `channels_first` image data format
x = Conv2D(64, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = Conv2D(32, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
if input_dim[0] == 28:
x = Conv2D(64, (3, 3), activation='relu')(x)
else:
x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(input_dim[2], (3, 3), activation='sigmoid', padding='same')(x)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
return autoencoder
# def build_lstm_autoencoder(timesteps, input_dim)
# inputs = Input(shape=(timesteps, input_dim))
# encoded = LSTM(latent_dim)(inputs)
# decoded = RepeatVector(timesteps)(encoded)
# decoded = LSTM(input_dim, return_sequences=True)(decoded)
# sequence_autoencoder = Model(inputs, decoded)
# encoder = Model(inputs, encoded)
# return encoder, sequence_autoencoder
def generator(self):
if self.G:
return self.G
self.G = Sequential()
dropout = 0.4
depth = 64+64+64+64
dim = 7
# In: 100
# Out: dim x dim x depth
self.G.add(Dense(dim*dim*depth, input_dim=100))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
self.G.add(Reshape((dim, dim, depth)))
self.G.add(Dropout(dropout))
# In: dim x dim x depth
# Out: 2*dim x 2*dim x depth/2
self.G.add(UpSampling2D())
self.G.add(Conv2DTranspose(int(depth/2), 5, padding='same'))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
self.G.add(UpSampling2D())
self.G.add(Conv2DTranspose(int(depth/4), 5, padding='same'))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
self.G.add(Conv2DTranspose(int(depth/8), 5, padding='same'))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation('relu'))
# Out: 28 x 28 x 1 grayscale image [0.0,1.0] per pix
self.G.add(Conv2DTranspose(1, 5, padding='same'))
self.G.add(Activation('sigmoid'))
self.G.summary()
return self.G
def basic_gen(input_shape, img_shape, nf=128, scale=4, FC=[], use_upsample=False):
dim, h, w = img_shape
img = Input(input_shape)
x = img
for fc_dim in FC:
x = Dense(fc_dim)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dense(nf*2**(scale-1)*(h/2**scale)*(w/2**scale))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Reshape((nf*2**(scale-1), h/2**scale, w/2**scale))(x)
for s in range(scale-2, -1, -1):
# up sample can elimiate the checkbroad artifact
# http://distill.pub/2016/deconv-checkerboard/
if use_upsample:
x = UpSampling2D()(x)
x = Conv2D(nf*2**s, (3,3), padding='same')(x)
else:
x = Deconv2D(nf*2**s, (3, 3), strides=(2, 2), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
if use_upsample:
x = UpSampling2D()(x)
x = Conv2D(dim, (3, 3), padding='same')(x)
else:
x = Deconv2D(dim, (3, 3), strides=(2, 2), padding='same')(x)
x = Activation('tanh')(x)
return Model(img, x)
def conv_layer(in_, nb_filter, filter_length, subsample=1, upsample=1, only_conv=False):
if upsample != 1:
out = UpSampling2D(size=(upsample, upsample))(in_)
else:
out = in_
padding = int(np.floor(filter_length / 2))
out = ReflectPadding2D((padding, padding))(out)
out = Conv2D(nb_filter, filter_length, filter_length, subsample=(subsample, subsample), border_mode="valid")(out)
if not only_conv:
out = InstanceNormalization()(out)
out = Activation("relu")(out)
return out
archs.py 文件源码
项目:kaggle-dstl-satellite-imagery-feature-detection
作者: alno
项目源码
文件源码
阅读 16
收藏 0
点赞 0
评论 0
def unet(input_shapes, n_classes):
inputs = Input(input_shapes['in'], name='in')
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(384, 3, 3, activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(384, 3, 3, activation='relu', border_mode='same')(conv5)
up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(n_classes, 1, 1, activation='sigmoid')(conv9)
return Model(input=inputs, output=conv10)
archs.py 文件源码
项目:kaggle-dstl-satellite-imagery-feature-detection
作者: alno
项目源码
文件源码
阅读 16
收藏 0
点赞 0
评论 0
def unet_ma(input_shapes, n_classes):
in_M = Input(input_shapes['in_M'], name='in_M')
in_A = Input(input_shapes['in_A'], name='in_A')
inputs = merge([in_A, in_M], mode='concat', concat_axis=1)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
up7 = merge([UpSampling2D(size=(2, 2))(conv4), conv3], mode='concat', concat_axis=1)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(n_classes, 1, 1, activation='sigmoid')(conv9)
return Model(input=[in_M, in_A], output=conv10)
archs.py 文件源码
项目:kaggle-dstl-satellite-imagery-feature-detection
作者: alno
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def merge_block(conv, skip, mode='concat'):
return merge([UpSampling2D(size=(2, 2))(conv), skip], mode=mode, concat_axis=1)
def autoencoder(channels=3):
input_img = Input(shape=(channels, 256, 256))
x = Conv2D(32, 3, 3, activation='relu', border_mode='same')(input_img)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Conv2D(16, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Conv2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Conv2D(8, 3, 3, activation='relu', border_mode='same')(x)
encoded = MaxPooling2D((2, 2), border_mode='same')(x)
x = Conv2D(8, 3, 3, activation='relu', border_mode='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, 3, 3, activation='relu', border_mode='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(32, 3, 3, activation='relu', border_mode='same')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(channels, 3, 3, activation='sigmoid', border_mode='same')(x)
ae = Model(input_img, decoded)
# sgd = SGD(lr=0.001, momentum=.9, decay=1e-3)
ae.compile(optimizer='adadelta', loss='mse')
return ae
def modeling(self, input_shape, nb_classes):
nb_filters = 8
# size of pooling area for max pooling
pool_size_l = [(4, 4), (4,4)] # 160 --> 40, 40 --> 10
# convolution kernel size
kernel_size = (20, 20)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size_l[0])) # 160 --> 40
model.add(Dropout(0.25))
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size_l[1])) # 40 --> 10
model.add(Dropout(0.25))
model.add(UpSampling2D(pool_size_l[1])) # 10 --> 40
#model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
# border_mode='valid'))
#model.add(Activation('relu'))
#model.add(UpSampling2D(pool_size_2))
#model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(4))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
return model