def modeling_1(self, input_shape, nb_classes):
print("Modeling_1")
nb_filters = 8
# size of pooling area for max pooling
pool_size_l = [(4, 4), (4,4)] # 160 --> 40, 40 --> 10
# convolution kernel size
kernel_size = (20, 20)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size_l[0])) # 160 --> 40
model.add(Dropout(0.25))
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size_l[1])) # 40 --> 10
model.add(Dropout(0.25))
model.add(Convolution2D(nb_filters, 2, 2,
border_mode='valid'))
model.add(Activation('relu'))
model.add(UpSampling2D(pool_size_l[1])) # 10 --> 40
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(4))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
return model
python类UpSampling2D()的实例源码
def modeling_2(self, input_shape, nb_classes):
print("Modeling_2")
nb_filters = 8
# size of pooling area for max pooling
pool_size_l = [(4, 4), (4,4)] # 160 --> 40, 40 --> 10
# convolution kernel size
kernel_size = (20, 20)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size_l[0])) # 160 --> 40
model.add(Dropout(0.25))
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size_l[1])) # 40 --> 10
model.add(Dropout(0.25))
model.add(Convolution2D(nb_filters, 2, 2,
border_mode='valid'))
model.add(Activation('relu'))
model.add(UpSampling2D(pool_size_l[1])) # 10 --> 40
model.add(Dropout(0.25))
model.add(Convolution2D(nb_filters, 2, 2,
border_mode='valid'))
model.add(Activation('relu'))
model.add(UpSampling2D(pool_size_l[0])) # 40 --> 160
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
return model
def modeling(self):
Lx, Ly = self.Lx, self.Ly
input_img = Input(shape=(1, Lx, Ly))
ks = 8
x = Convolution2D(16, ks*2, ks*2, activation='relu', border_mode='same')(input_img)
x = MaxPooling2D((2, 2), border_mode='same')(x) # 160 --> 80
x = Convolution2D(8, ks*2, ks*2, activation='relu', border_mode='same')(x)
x = MaxPooling2D((2, 2), border_mode='same')(x) # 80 --> 40
x = Convolution2D(8, ks*2, ks*2, activation='relu', border_mode='same')(x)
encoded = MaxPooling2D((2, 2), border_mode='same')(x) # 40 --> 20
# at this point the representation is (8, 20, 20)
x = Convolution2D(8, ks, ks, activation='relu', border_mode='same')(encoded)
x = UpSampling2D((2, 2))(x) # 20 --> 40
x = Convolution2D(8, ks, ks, activation='relu', border_mode='same')(x)
x = UpSampling2D((2, 2))(x) # 20 --> 80
x = Convolution2D(16, ks, ks, activation='relu', border_mode='same')(x)
x = UpSampling2D((2, 2))(x) # 80 --> 160
decoded = Convolution2D(1, ks, ks, activation='sigmoid', border_mode='same')(x)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
self.autoencoder = autoencoder
def modeling(self):
input_img = Input(shape=(1, 28, 28))
# set-1
x = Convolution2D(16, 3, 3, activation='relu',
border_mode='same')(input_img) # 16,28,28
x = MaxPooling2D((2, 2), border_mode='same')(x) # 16,14,14
x = Dropout(0.25)(x) # Use dropout after maxpolling
# set-2
x = Convolution2D(8, 3, 3, activation='relu',
border_mode='same')(x) # 8,14,14
x = MaxPooling2D((2, 2), border_mode='same')(x) # 8,7,7
x = Dropout(0.25)(x) # Use dropout after maxpolling
# set-3
x = Convolution2D(8, 3, 3, activation='relu',
border_mode='same')(x) # 8,7,7
encoded = x
x = Convolution2D(8, 3, 3, activation='relu',
border_mode='same')(encoded) # 8,7,7
# x = Dropout(0.25)(x) # Use dropout after maxpolling
x = UpSampling2D((2, 2))(x) # 8,14,14
x = Convolution2D(8, 3, 3, activation='relu',
border_mode='same')(x) # 8,14,14
# x = Dropout(0.25)(x) # Use dropout after maxpolling
x = UpSampling2D((2, 2))(x) # 8, 28, 28
x = Convolution2D(16, 3, 3, activation='relu',
border_mode='same')(x) # 16, 28, 28
# x = Dropout(0.25)(x) # Use dropout after maxpolling
decoded = Convolution2D(
1, 3, 3, activation='sigmoid', border_mode='same')(x) # 1, 28, 28
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
self.autoencoder = autoencoder
def modeling(self):
input_img = Input(shape=(1, 28, 28))
# set-1
x = Convolution2D(16, 3, 3, activation='relu',
border_mode='same')(input_img) # 16,28,28
x = MaxPooling2D((2, 2), border_mode='same')(x) # 16,14,14
x = Dropout(0.25)(x) # Use dropout after maxpolling
# set-2
x = Convolution2D(8, 3, 3, activation='relu',
border_mode='same')(x) # 8,14,14
x = MaxPooling2D((2, 2), border_mode='same')(x) # 8,7,7
x = Dropout(0.25)(x) # Use dropout after maxpolling
# set-3
x = Convolution2D(8, 3, 3, activation='relu',
border_mode='same')(x) # 8,7,7
encoded = x
x = Convolution2D(8, 3, 3, activation='relu',
border_mode='same')(encoded) # 8,7,7
# x = Dropout(0.25)(x) # Use dropout after maxpolling
x = UpSampling2D((2, 2))(x) # 8,14,14
x = Convolution2D(8, 3, 3, activation='relu',
border_mode='same')(x) # 8,14,14
# x = Dropout(0.25)(x) # Use dropout after maxpolling
x = UpSampling2D((2, 2))(x) # 8, 28, 28
x = Convolution2D(16, 3, 3, activation='relu',
border_mode='same')(x) # 16, 28, 28
# x = Dropout(0.25)(x) # Use dropout after maxpolling
decoded = Convolution2D(
1, 3, 3, activation='sigmoid', border_mode='same')(x) # 1, 28, 28
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
self.autoencoder = autoencoder
def get_modified_vgg19():
model = vgg19.VGG19(weights = 'imagenet', include_top = True)
for x in range(20):
model.layers.pop()
x = UpSampling2D()(model.layers[-1].ouput)
x = Deconv2D(64, (3,3), padding = 'same', activation = 'relu')(x)
x = Deconv2D(3, (1,1), padding = 'same', activation = None)(x)
mod = keras.models.Model(input = model.input, output = x)
adam = Adam(lr=0.005, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=3e-06)
mod.compile(loss='mse', optimizer = adam)
return mod
deep_learning_2DUnet.py 文件源码
项目:TC-Lung_nodules_detection
作者: Shicoder
项目源码
文件源码
阅读 16
收藏 0
点赞 0
评论 0
def get_unet():
inputs = Input((1, img_rows, img_cols))
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Dropout(0.2)(conv1)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
pool1 = BatchNormalization()(pool1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Dropout(0.2)(conv2)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
pool2 = BatchNormalization()(pool2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Dropout(0.2)(conv3)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
pool3 = BatchNormalization()(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Dropout(0.2)(conv4)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
pool4 = BatchNormalization()(pool4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
conv5 = Dropout(0.2)(conv5)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)
conv6 = Dropout(0.2)(conv6)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)
conv6 = BatchNormalization()(conv6)
up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
conv7 = Dropout(0.2)(conv7)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
conv7 = BatchNormalization()(conv7)
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
conv8 = Dropout(0.2)(conv8)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)
conv8 = BatchNormalization()(conv8)
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
conv9 = Dropout(0.2)(conv9)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
conv9 = BatchNormalization()(conv9)
conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)
model = Model(input=inputs, output=conv10)
model.compile(optimizer=Adam(lr=1.0e-4), loss=dice_coef_loss, metrics=[dice_coef])
return model
def build_fcn(X):
#
# DESCRIPTION
# KERAS FCN DEFINITION
# Using the shape of the input to setup the input layer we create a FCN with 2 skips
#
# INPUTS
# X [number_of_images, 400, 400, channels]
#
# OUTPUTS
# model uninstantiated Keras model
#
img_rows, img_cols = 400, 400
inputs = Input(shape=X.shape[1:])
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Convolution2D(32, 4, 4, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, 4, 4, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv3) # 50 50
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv4) # 25 25
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
pool5 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv5)
drop3 = Dropout(0.5)(pool5)
convpool3 = Convolution2D(60, 1, 1, activation='relu', border_mode='same')(pool3)
convpool4 = Convolution2D(60, 1, 1, activation='relu', border_mode='same')(pool4)
convdrop3 = Convolution2D(60, 1, 1, activation='relu', border_mode='same')(drop3)
drop3x5 = UpSampling2D(size=(5, 5))(convdrop3)
croppeddrop3x5 = Cropping2D(((5,5),(5,5)))(drop3x5) # 50 50
pool4x2 = UpSampling2D(size=(2, 2))(convpool4) # 50 50
fuse2 = merge([convpool3, pool4x2, croppeddrop3x5], mode='concat', concat_axis=-1) # 50 50 4224
upscore3 = UpSampling2D(size=(8, 8))(fuse2) # F 8s
convscore3 = Convolution2D(1, 1, 1, activation='sigmoid')(upscore3)
# Instantiate Model object
model = Model(input=inputs, output=convscore3)
sgd = SGD(lr=1e-5, decay=2, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss=pixel_wise_loss, metrics=['mean_squared_error'])
#model.compile(loss='mean_squared_error', optimizer=sgd)
return model
## CUSTOM LOSS FUNCTION
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4, block_prefix=None):
'''Adds an upsampling block. Upsampling operation relies on the the type parameter.
# Arguments
ip: input keras tensor
nb_filters: integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution)
type: can be 'upsampling', 'subpixel', 'deconv'. Determines
type of upsampling performed
weight_decay: weight decay factor
block_prefix: str, for block unique naming
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, nb_filter, rows * 2, cols * 2)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows * 2, cols * 2, nb_filter)` if data_format='channels_last'.
# Returns
a keras tensor
'''
with K.name_scope('TransitionUp'):
if type == 'upsampling':
x = UpSampling2D(name=name_or_none(block_prefix, '_upsampling'))(ip)
elif type == 'subpixel':
x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay),
use_bias=False, kernel_initializer='he_normal', name=name_or_none(block_prefix, '_conv2D'))(ip)
x = SubPixelUpscaling(scale_factor=2, name=name_or_none(block_prefix, '_subpixel'))(x)
x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay),
use_bias=False, kernel_initializer='he_normal', name=name_or_none(block_prefix, '_conv2D'))(x)
else:
x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay),
name=name_or_none(block_prefix, '_conv2DT'))(ip)
return x
def get_unet():
inputs = Input((1, img_rows, img_cols))
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)
model = Model(input=inputs, output=conv10)
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])
return model
def create_model(img_height, img_width, nb_channels, learning_rate):
if K.image_dim_ordering() == 'th':
channel_axis = 1
inputs = Input((nb_channels, img_height, img_width))
else:
channel_axis = 3
inputs = Input((img_height, img_width, nb_channels))
print('K.image_dim_ordering={} Channel axis={}'.format(K.image_dim_ordering(), channel_axis))
# inputs = Input((1, img_rows, img_cols))
conv1 = Conv2D(32, (3, 3), padding="same", activation="relu")(inputs)
conv1 = Conv2D(32, (3, 3), padding="same", activation="relu")(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), padding="same", activation="relu")(pool1)
conv2 = Conv2D(64, (3, 3), padding="same", activation="relu")(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(128, (3, 3), padding="same", activation="relu")(pool2)
conv3 = Conv2D(128, (3, 3), padding="same", activation="relu")(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(256, (3, 3), padding="same", activation="relu")(pool3)
conv4 = Conv2D(256, (3, 3), padding="same", activation="relu")(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(512, (3, 3), padding="same", activation="relu")(pool4)
conv5 = Conv2D(512, (3, 3), padding="same", activation="relu")(conv5)
up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4], axis=channel_axis)
conv6 = Conv2D(256, (3, 3), padding="same", activation="relu")(up6)
conv6 = Conv2D(256, (3, 3), padding="same", activation="relu")(conv6)
up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3], axis=channel_axis)
conv7 = Conv2D(128, (3, 3), padding="same", activation="relu")(up7)
conv7 = Conv2D(128, (3, 3), padding="same", activation="relu")(conv7)
up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2], axis=channel_axis)
conv8 = Conv2D(64, (3, 3), padding="same", activation="relu")(up8)
conv8 = Conv2D(64, (3, 3), padding="same", activation="relu")(conv8)
up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1], axis=channel_axis)
conv9 = Conv2D(32, (3, 3), padding="same", activation="relu")(up9)
conv9 = Conv2D(32, (3, 3), padding="same", activation="relu")(conv9)
conv10 = Conv2D(nb_channels, (1, 1), activation='sigmoid')(conv9)
model = Model(inputs=inputs, outputs=conv10)
model.compile(optimizer=Adam(lr=learning_rate), loss=dice_coef_loss, metrics=[dice_coef])
return model
def build_models(self, input_shape):
self.discriminator = Sequential()
self.discriminator.add(Conv2D(64, (5, 5), strides=(2, 2), padding = 'same', activation='relu', input_shape=input_shape))
self.discriminator.add(LeakyReLU(0.2))
self.discriminator.add(Dropout(0.5))
self.discriminator.add(Conv2D(128, (5, 5), strides=(2, 2), padding = 'same', activation='relu'))
self.discriminator.add(LeakyReLU(0.2))
self.discriminator.add(Dropout(0.5))
self.discriminator.add(Conv2D(256, (5, 5), strides=(2, 2), padding = 'same', activation='relu'))
self.discriminator.add(LeakyReLU(0.2))
self.discriminator.add(Dropout(0.5))
# 7x7 for MNIST
#H = Conv2D(512, (5, 5), strides=(2, 2), padding = 'same', activation='relu')(H)
#H = LeakyReLU(0.2)(H)
#H = Dropout(0.5)(H)
self.discriminator.add(Flatten())
self.discriminator.add(Dense(1+self.num_classes,activation='softmax'))
self.discriminator.summary()
self.generator = Sequential()
self.generator.add(Dense(7*7*256, input_shape=(100,)))
#self.generator.add(BatchNormalization())
self.generator.add(Activation('relu'))
if keras.backend.image_data_format() == 'channels_first':
self.generator.add(Reshape([256, 7, 7]))
else:
self.generator.add(Reshape([7, 7, 256]))
self.generator.add(Dropout(0.5))
self.generator.add(UpSampling2D(size=(2, 2)))
self.generator.add(Conv2D(128, (5, 5), padding='same'))
self.generator.add(BatchNormalization())
self.generator.add(Activation('relu'))
self.generator.add(Dropout(0.5))
self.generator.add(UpSampling2D(size=(2, 2)))
self.generator.add(Conv2D(64, (5, 5), padding='same'))
self.generator.add(BatchNormalization())
self.generator.add(Activation('relu'))
# we're ignoring input shape - just assuming it's 7,7,1
self.generator.add(Conv2D(1, (5, 5), padding='same'))
self.generator.add(Activation('sigmoid'))
self.generator.summary()
self.real_image_model = Sequential()
self.real_image_model.add(self.discriminator)
self.real_image_model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=1e-4),
metrics=['accuracy'])
self.fake_image_model = Sequential()
self.fake_image_model.add(self.generator)
self.discriminator.trainable = False
self.fake_image_model.add(self.discriminator)
self.fake_image_model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=1e-4),
metrics=['accuracy'])
def ZF_UNET_224(dropout_val=0.05, batch_norm=True):
from keras.models import Model
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Dropout, Activation
inputs = Input((3, 224, 224))
conv1 = double_conv_layer(inputs, 32, dropout_val, batch_norm)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = double_conv_layer(pool1, 64, dropout_val, batch_norm)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = double_conv_layer(pool2, 128, dropout_val, batch_norm)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = double_conv_layer(pool3, 256, dropout_val, batch_norm)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = double_conv_layer(pool4, 512, dropout_val, batch_norm)
pool5 = MaxPooling2D(pool_size=(2, 2))(conv5)
conv6 = double_conv_layer(pool5, 1024, dropout_val, batch_norm)
up6 = merge([UpSampling2D(size=(2, 2))(conv6), conv5], mode='concat', concat_axis=1)
conv7 = double_conv_layer(up6, 512, dropout_val, batch_norm)
up7 = merge([UpSampling2D(size=(2, 2))(conv7), conv4], mode='concat', concat_axis=1)
conv8 = double_conv_layer(up7, 256, dropout_val, batch_norm)
up8 = merge([UpSampling2D(size=(2, 2))(conv8), conv3], mode='concat', concat_axis=1)
conv9 = double_conv_layer(up8, 128, dropout_val, batch_norm)
up9 = merge([UpSampling2D(size=(2, 2))(conv9), conv2], mode='concat', concat_axis=1)
conv10 = double_conv_layer(up9, 64, dropout_val, batch_norm)
up10 = merge([UpSampling2D(size=(2, 2))(conv10), conv1], mode='concat', concat_axis=1)
conv11 = double_conv_layer(up10, 32, 0, batch_norm)
conv12 = Convolution2D(1, 1, 1)(conv11)
conv12 = BatchNormalization(mode=0, axis=1)(conv12)
conv12 = Activation('sigmoid')(conv12)
model = Model(input=inputs, output=conv12)
return model
def build_model():
model = Sequential()
# ???????4????????????5*5?1??????????,????1??
model.add(Convolution2D(4, 5, 5, border_mode='valid', dim_ordering='th', input_shape=(1, 20, 20)))
model.add(ZeroPadding2D((1, 1)))
model.add(BatchNormalization())
model.add(Activation('tanh'))
# ???????8????????????3*3?4??????????????????????
model.add(GaussianNoise(0.001))
model.add(UpSampling2D(size=(2, 2), dim_ordering='th'))
model.add(AtrousConvolution2D(16, 3, 3, border_mode='valid', dim_ordering='th'))
model.add(ZeroPadding2D((1, 1)))
model.add(BatchNormalization())
model.add(Activation('tanh'))
# model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='th'))
model.add(AveragePooling2D(pool_size=(2, 2), dim_ordering='th'))
model.add(Activation('tanh'))
# ???????16????????????4*4
model.add(AtrousConvolution2D(8, 3, 3, border_mode='valid', dim_ordering='th'))
model.add(ZeroPadding2D((1, 1)))
model.add(BatchNormalization())
model.add(Activation('linear'))
# ???????16????????????4*4
model.add(GaussianNoise(0.002))
model.add(AtrousConvolution2D(4, 3, 3, border_mode='valid', dim_ordering='th'))
model.add(ZeroPadding2D((1, 1)))
model.add(BatchNormalization())
model.add(Activation('tanh'))
model.add(Dropout(0.2))
# model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='th'))
model.add(AveragePooling2D(pool_size=(2, 2), dim_ordering='th'))
model.add(Activation('tanh'))
# ??????????????????flatten????
model.add(Flatten())
model.add(Dense(8))
model.add(BatchNormalization())
model.add(Activation('tanh'))
model.add(Dense(1))
model.add(Activation('linear'))
start = time.time()
# ??SGD + momentum
# model.compile????loss??????(????)
# sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile(loss="mse", optimizer=sgd)
model.compile(loss="mse", optimizer="rmsprop") # mse kld # Nadam rmsprop
print "Compilation Time : ", time.time() - start
return model
def build_model():
model = Sequential()
# ???????4????
model.add(Convolution2D(8, 5, 5, border_mode='valid', dim_ordering='th', input_shape=(1, 20, 20)))
model.add(ZeroPadding2D((1, 1)))
model.add(GaussianNoise(0.001))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='th'))
model.add(Activation('tanh'))
# ???????8?????
# model.add(GaussianNoise(0.001))
# model.add(UpSampling2D(size=(2, 2), dim_ordering='th'))
model.add(AtrousConvolution2D(16, 3, 3, border_mode='valid', dim_ordering='th'))
# model.add(ZeroPadding2D((1, 1)))
model.add(Activation('tanh'))
# model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='th'))
# model.add(Activation('tanh'))
# ??????????????????flatten????
model.add(Flatten())
model.add(Dense(20))
model.add(Activation('tanh'))
# LSTM ?
model.add(Reshape((20, 1)))
model.add(LSTM(input_dim=1, output_dim=32, activation='tanh', inner_activation='tanh', return_sequences=True))
model.add(GaussianNoise(0.01))
model.add(LSTM(64, activation='tanh', inner_activation='tanh', return_sequences=False))
model.add(Dropout(0.2)) # Dropout overfitting
model.add(Dense(1))
model.add(Activation('linear'))
start = time.time()
# ??SGD + momentum
# model.compile????loss??????(????)
# sgd = SGD(lr=0.08, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile(loss="mse", optimizer=sgd)
model.compile(loss="mse", optimizer="Nadam") # Nadam # rmsprop
print "Compilation Time : ", time.time() - start
return model
def __init__(self, h_in, w_in, dims):
# Each MaxPooling2D (2,2) layer halves the image size
resize_factor = len(dims)
# Number of filters on last layer correspond to
# the number of filters of the last conv layer
filters_encoded = dims[-1][0]
input_img = Input(shape=(1,h_in, w_in), name='EncoderIn')
decoder_input = Input(shape=(filters_encoded, h_in / (2 ** resize_factor), w_in / (2 ** resize_factor)), name='DecoderIn')
# Construct encoder layers
encoded = input_img
for i, (filters, rows, cols) in enumerate(dims):
name = 'Conv{0}'.format(i)
encoded = Convolution2D(filters, rows, cols, activation='relu', border_mode='same', name=name)(encoded)
encoded = MaxPooling2D((2, 2), border_mode='same', name= 'MaxPool{0}'.format(i))(encoded)
# Construct decoder layers
# The decoded is connected to the encoders, whereas the decoder is not
decoded = encoded
decoder = decoder_input
for i, dim in enumerate(reversed(dims)):
convlayer = Convolution2D(filters, rows, cols, activation='relu', border_mode='same', name='Deconv{0}'.format(i))
decoded = convlayer(decoded)
decoder = convlayer(decoder)
upsample = UpSampling2D((2, 2), name='UpSampling{0}'.format(i))
decoded = upsample(decoded)
decoder = upsample(decoder)
# Reduce from X filters to 1 in the output layer. Make sure its sigmoid for the [0..1] range
convlayer = Convolution2D(1, dims[0][0], dims[0][1], activation='sigmoid', border_mode='same')
decoded = convlayer(decoded)
decoder = convlayer(decoder)
self.autoencoder = Model(input=input_img, output=decoded)
self.encoder = Model(input=input_img, output=encoded)
self.decoder = Model(input=decoder_input, output=decoder)
self.autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
archs.py 文件源码
项目:kaggle-dstl-satellite-imagery-feature-detection
作者: alno
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def rnet1(input_shapes, n_classes):
def conv(size, x):
x = Convolution2D(size, 3, 3, border_mode='same', init='he_normal', bias=False)(x)
x = BatchNormalization(axis=1, mode=0)(x)
x = PReLU(shared_axes=[2, 3])(x)
return x
def unet_block(sizes, inp):
x = inp
skips = []
for sz in sizes[:-1]:
x = conv(sz, x)
skips.append(x)
x = MaxPooling2D((2, 2))(x)
x = conv(sizes[-1], x)
for sz in reversed(sizes[:-1]):
x = conv(sz, merge([UpSampling2D((2, 2))(x), skips.pop()], mode='concat', concat_axis=1))
return x
def fcn_block(sizes, inp):
x = inp
for sz in sizes:
x = conv(sz, x)
return Dropout(0.2)(x)
# Build piramid of inputs
inp0 = Input(input_shapes['in'], name='in')
inp1 = AveragePooling2D((2, 2))(inp0)
inp2 = AveragePooling2D((2, 2))(inp1)
# Build outputs in resnet fashion
out2 = unet_block([32, 48], inp2)
#out2 = merge([unet_block([32, 48, 32], merge([inp2, out2], mode='concat', concat_axis=1)), out2], mode='sum')
out1 = UpSampling2D((2, 2))(out2)
#out1 = merge([unet_block([32, 32, 48], merge([inp1, out1], mode='concat', concat_axis=1)), out1], mode='sum')
out1 = merge([unet_block([32, 48, 64], merge([inp1, out1], mode='concat', concat_axis=1)), out1], mode='sum')
out0 = UpSampling2D((2, 2))(out1)
out0 = merge([unet_block([32, 48, 64], merge([inp0, out0], mode='concat', concat_axis=1)), out0], mode='sum')
out0 = merge([unet_block([32, 48, 64], merge([inp0, out0], mode='concat', concat_axis=1)), out0], mode='sum')
# Final convolution
out = Convolution2D(n_classes, 1, 1, activation='sigmoid')(out0)
return Model(input=inp0, output=out)
archs.py 文件源码
项目:kaggle-dstl-satellite-imagery-feature-detection
作者: alno
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def rnet1_mi(input_shapes, n_classes):
def conv(size, x):
x = Convolution2D(size, 3, 3, border_mode='same', init='he_normal', bias=False)(x)
x = BatchNormalization(axis=1, mode=0)(x)
x = PReLU(shared_axes=[2, 3])(x)
return x
def unet_block(sizes, inp):
x = inp
skips = []
for sz in sizes[:-1]:
x = conv(sz, x)
skips.append(x)
x = MaxPooling2D((2, 2))(x)
x = conv(sizes[-1], x)
for sz in reversed(sizes[:-1]):
x = conv(sz, merge([UpSampling2D((2, 2))(x), skips.pop()], mode='concat', concat_axis=1))
return x
def radd(out, inp, block):
block_in = merge([inp, out], mode='concat', concat_axis=1)
block_out = block(block_in)
return merge([block_out, out], mode='sum')
in_I = Input(input_shapes['in_I'], name='in_I')
in_M = Input(input_shapes['in_M'], name='in_M')
# Build piramid of inputs
inp0 = in_I
inp1 = AveragePooling2D((2, 2))(inp0)
inp2 = merge([AveragePooling2D((2, 2))(inp1), in_M], mode='concat', concat_axis=1)
inp3 = AveragePooling2D((2, 2))(inp2)
# Build outputs in resnet fashion
out3 = unet_block([32, 48], inp3)
out2 = UpSampling2D((2, 2))(out3)
out2 = radd(out2, inp2, lambda x: unet_block([32, 48], x))
out1 = UpSampling2D((2, 2))(out2)
out1 = radd(out1, inp1, lambda x: unet_block([32, 48], x))
out1 = radd(out1, inp1, lambda x: unet_block([32, 48, 64], x))
out0 = UpSampling2D((2, 2))(out1)
out0 = radd(out0, inp0, lambda x: unet_block([32, 48], x))
out0 = radd(out0, inp0, lambda x: unet_block([32, 48, 64], x))
# Final convolution
out = Convolution2D(n_classes, 1, 1, activation='sigmoid')(out0)
return Model(input=[in_I, in_M], output=out)
archs.py 文件源码
项目:kaggle-dstl-satellite-imagery-feature-detection
作者: alno
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def unet_mi_2(input_shapes, n_classes):
in_I = Input(input_shapes['in_I'], name='in_I')
in_M = Input(input_shapes['in_M'], name='in_M')
conv1 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(in_I)
conv1 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(96, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(96, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(merge([pool2, in_M], mode='concat', concat_axis=1))
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv5)
up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling2D(size=(2, 2))(conv4), conv3], mode='concat', concat_axis=1)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
conv8 = Convolution2D(96, 3, 3, activation='relu', border_mode='same')(up8)
conv8 = Convolution2D(96, 3, 3, activation='relu', border_mode='same')(conv8)
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
conv9 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up9)
conv9 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(n_classes, 1, 1, activation='sigmoid')(conv9)
return Model(input=[in_I, in_M], output=conv10)
archs.py 文件源码
项目:kaggle-dstl-satellite-imagery-feature-detection
作者: alno
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def dnet1_mi(input_shapes, n_classes):
def concat(xs):
if len(xs) == 1:
return xs[0]
return merge(xs, mode='concat', concat_axis=1)
def conv(k, s, x):
return Convolution2D(k, s, s, border_mode='same', init='he_normal')(x)
def dense_block(k, n, inp, append=False):
outputs = [inp] if append else []
for i in xrange(n):
x = Convolution2D(k, 3, 3, border_mode='same', init='he_normal')(inp)
x = BatchNormalization(axis=1, mode=0)(x)
x = PReLU(shared_axes=[2, 3])(x)
outputs.append(x)
inp = concat([inp, x])
return concat(outputs)
def down_block(x):
return MaxPooling2D((2, 2))(x)
def up_block(x):
return UpSampling2D(size=(2, 2))(x)
inputs = dict([(name, Input(shape, name=name)) for name, shape in input_shapes.items()])
# Downpath
d0 = conv(32, 1, concat([inputs['in_I'], inputs['in_IF']]))
c1 = dense_block(16, 2, d0, append=True)
d1 = down_block(c1)
c2 = dense_block(16, 3, d1, append=True)
d2 = down_block(c2)
c3 = dense_block(16, 4, concat([d2, inputs['in_M'], inputs['in_MI']]), append=True)
d3 = down_block(c3)
c4 = dense_block(16, 5, d3, append=True)
d4 = down_block(c4)
# Bottleneck
c5 = dense_block(16, 6, d4, append=True)
# Uppath
u4 = dense_block(16, 10, concat([c4, up_block(c5)]))
u3 = dense_block(16, 8, concat([c3, up_block(u4)]))
u2 = dense_block(16, 6, concat([c2, up_block(u3)]))
u1 = dense_block(16, 4, concat([c1, up_block(u2)]))
out = Activation('sigmoid')(conv(n_classes, 1, u1))
return Model(input=inputs.values(), output=out)