def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor, subsample_factor)
x = BatchNormalization(axis=4)(input_tensor)
x = Activation('relu')(x)
x = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=4)(x)
x = Activation('relu')(x)
x = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution3D(nb_filters, 1, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
python类Convolution3D()的实例源码
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor, subsample_factor)
x = BatchNormalization(axis=4)(input_tensor)
x = Activation('relu')(x)
x = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=4)(x)
x = Activation('relu')(x)
x = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution3D(nb_filters, 1, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor, subsample_factor)
x = BatchNormalization(axis=4)(input_tensor)
x = Activation('relu')(x)
x = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=4)(x)
x = Activation('relu')(x)
x = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution3D(nb_filters, 1, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor, subsample_factor)
x = BatchNormalization(axis=4)(input_tensor)
x = Activation('relu')(x)
x = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=4)(x)
x = Activation('relu')(x)
x = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution3D(nb_filters, 1, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def create_cnn_network(input_dim, no_conv_filt, dense_n):
'''Base network to be shared (eq. to feature extraction).
'''
seq = Sequential()
kern_size = 3
# conv layers
seq.add(Convolution3D(no_conv_filt, kern_size, kern_size, kern_size, input_shape=input_dim,
border_mode='valid', dim_ordering='th', activation='relu'))
seq.add(Dropout(.1))
seq.add(BatchNormalization(mode=2))
# dense layers
seq.add(Flatten())
seq.add(Dense(dense_n, activation='relu'))
seq.add(BatchNormalization(mode=2))
return seq
# a network with a couple dense layers
def create_cnn_network(input_dim, no_conv_filt, dense_n):
'''Base network to be shared (eq. to feature extraction).
'''
seq = Sequential()
kern_size = 3
# conv layer
seq.add(Convolution3D(no_conv_filt, kern_size, kern_size, kern_size, input_shape=input_dim,
border_mode='valid', dim_ordering='th', activation='relu'))
#seq.add(Dropout(.1))
seq.add(BatchNormalization(mode=2))
# dense layer
seq.add(Flatten())
seq.add(Dense(dense_n, activation='relu'))
seq.add(BatchNormalization(mode=2))
return seq
# a CNN layer for intensity inputs
def create_cnn_network_small(input_dim, no_conv_filt, dense_n):
'''Base network to be shared (eq. to feature extraction).
'''
seq = Sequential()
kern_size = 3
# conv layer
seq.add(Convolution3D(no_conv_filt, kern_size, kern_size, kern_size, input_shape=input_dim,
border_mode='valid', dim_ordering='th', activation='relu'))
seq.add(Dropout(.2))
seq.add(BatchNormalization(mode=2))
# dense layer
seq.add(Flatten())
seq.add(Dense(dense_n, activation='relu'))
seq.add(Dropout(.2))
seq.add(BatchNormalization(mode=2))
return seq
# train model given x_train and y_train
def create_cnn_network(input_dim, no_conv_filt, dense_n):
'''Base network to be shared (eq. to feature extraction).
'''
seq = Sequential()
kern_size = 3
# conv layer
seq.add(Convolution3D(no_conv_filt, kern_size, kern_size, kern_size, input_shape=input_dim,
border_mode='valid', dim_ordering='th', activation='relu'))
#seq.add(Dropout(.1))
seq.add(BatchNormalization(mode=2))
# dense layer
seq.add(Flatten())
seq.add(Dense(dense_n, activation='relu'))
seq.add(BatchNormalization(mode=2))
return seq
# a CNN layer for intensity inputs
def create_cnn_network_small(input_dim, no_conv_filt, dense_n):
'''Base network to be shared (eq. to feature extraction).
'''
seq = Sequential()
kern_size = 3
# conv layer
seq.add(Convolution3D(no_conv_filt, kern_size, kern_size, kern_size, input_shape=input_dim,
border_mode='valid', dim_ordering='th', activation='relu'))
seq.add(Dropout(.2))
seq.add(BatchNormalization(mode=2))
# dense layer
seq.add(Flatten())
seq.add(Dense(dense_n, activation='relu'))
seq.add(Dropout(.2))
seq.add(BatchNormalization(mode=2))
return seq
# train model given x_train and y_train
def unet_model_xd3_2_6l_grid(nb_filter=48, dim=5, clen=3 , img_rows=224, img_cols=224 ): # NOTE that this procedure is/should be used with img_rows & img_cols as None
# aiming for architecture similar to the http://cs231n.stanford.edu/reports2016/317_Report.pdf
# Our model is six layers deep, consisting of a series of three CONV-RELU-POOL layyers (with 32, 32, and 64 3x3 filters), a CONV-RELU layer (with 128 3x3 filters), three UPSCALE-CONV-RELU lay- ers (with 64, 32, and 32 3x3 filters), and a final 1x1 CONV- SIGMOID layer to output pixel-level predictions. Its struc- ture resembles Figure 2, though with the number of pixels, filters, and levels as described here
## 3D CNN version of a previously developed unet_model_xd_6j
zconv = clen
inputs = Input((1, dim, img_rows, img_cols))
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(inputs)
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv4)
up6 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv2], mode='concat', concat_axis=1)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up6)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1) # original - only works for even dim
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up7)
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv7)
pool11 = MaxPooling3D(pool_size=(2, 1, 1))(conv7)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool11)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv12)
pool12 = MaxPooling3D(pool_size=(2, 1, 1))(conv12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv13)
pool13 = MaxPooling3D(pool_size=(2, 1, 1))(conv13)
if (dim < 16):
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool13)
else: # need one extra layer to get to 1D x 2D mask ...
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool13)
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv14)
pool14 = MaxPooling3D(pool_size=(2, 1, 1))(conv14)
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool14)
model = Model(input=inputs, output=conv8)
model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=[dice_coef])
#model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), loss=dice_coef_loss, metrics=[dice_coef])
return model
step5_train_nodule_detector.py 文件源码
项目:TC-Lung_nodules_detection
作者: Shicoder
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def get_net(input_shape=(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1), load_weight_path=None, features=False, mal=False) -> Model:
inputs = Input(shape=input_shape, name="input_1")
x = inputs
#x = AveragePooling3D(pool_size=(2, 1, 1), strides=(2, 1, 1), border_mode="same")(x)
x = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same', name='conv1', subsample=(1, 1, 1))(x)
x = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), border_mode='valid', name='pool1')(x)
# 2nd layer group
x = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same', name='conv2', subsample=(1, 1, 1))(x)
x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool2')(x)
#if USE_DROPOUT:
# x = Dropout(p=0.3)(x)
# 3rd layer group
x = Convolution3D(256, 3, 3, 3, activation='relu', border_mode='same', name='conv3a', subsample=(1, 1, 1))(x)
x = Convolution3D(256, 3, 3, 3, activation='relu', border_mode='same', name='conv3b', subsample=(1, 1, 1))(x)
x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool3')(x)
#if USE_DROPOUT:
# x = Dropout(p=0.4)(x)
# 4th layer group
x = Convolution3D(512, 3, 3, 3, activation='relu', border_mode='same', name='conv4a', subsample=(1, 1, 1))(x)
x = Convolution3D(512, 3, 3, 3, activation='relu', border_mode='same', name='conv4b', subsample=(1, 1, 1),)(x)
x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool4')(x)
#if USE_DROPOUT:
# x = Dropout(p=0.5)(x)
last64 = Convolution3D(64, 2, 2, 2, activation="relu", name="last_64")(x)
out_class = Convolution3D(1, 1, 1, 1, activation="sigmoid", name="out_class_last")(last64)
out_class = Flatten(name="out_class")(out_class)
out_malignancy = Convolution3D(1, 1, 1, 1, activation=None, name="out_malignancy_last")(last64)
out_malignancy = Flatten(name="out_malignancy")(out_malignancy)
model = Model(input=inputs, output=[out_class, out_malignancy])
if load_weight_path is not None:
model.load_weights(load_weight_path, by_name=False)
#model.compile(optimizer=SGD(lr=LEARN_RATE, momentum=0.9, nesterov=True), loss={"out_class": "binary_crossentropy", "out_malignancy": mean_absolute_error}, metrics={"out_class": [binary_accuracy, binary_crossentropy], "out_malignancy": mean_absolute_error})
model.compile(optimizer=SGD(lr=LEARN_RATE, momentum=0.9, nesterov=True), loss={"out_class": "binary_crossentropy"}, metrics={"out_class": [binary_accuracy, binary_crossentropy]})
if features:
model = Model(input=inputs, output=[last64])
model.summary(line_length=140)
return model
def define_model(image_shape):
img_input = Input(shape=image_shape)
x = Convolution3D(16, 5, 5, 5, subsample=(1, 1, 1), border_mode='same')(img_input)
x = res_block(x, nb_filters=16, block=0, subsample_factor=1)
x = res_block(x, nb_filters=16, block=0, subsample_factor=1)
x = res_block(x, nb_filters=16, block=0, subsample_factor=1)
x = res_block(x, nb_filters=32, block=1, subsample_factor=2)
x = res_block(x, nb_filters=32, block=1, subsample_factor=1)
x = res_block(x, nb_filters=32, block=1, subsample_factor=1)
x = res_block(x, nb_filters=64, block=2, subsample_factor=2)
x = res_block(x, nb_filters=64, block=2, subsample_factor=1)
x = res_block(x, nb_filters=64, block=2, subsample_factor=1)
x = res_block(x, nb_filters=128, block=3, subsample_factor=2)
x = res_block(x, nb_filters=128, block=3, subsample_factor=1)
x = res_block(x, nb_filters=128, block=3, subsample_factor=1)
x = BatchNormalization(axis=4)(x)
x = Activation('relu')(x)
x = AveragePooling3D(pool_size=(4, 4, 8))(x)
x = Flatten()(x)
x = Dense(1, activation='sigmoid', name='predictions')(x)
model = Model(img_input, x)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', 'precision', 'recall', 'fmeasure'])
model.summary()
return model
def define_model(image_shape):
img_input = Input(shape=image_shape)
x = Convolution3D(16, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(img_input)
x = res_block(x, nb_filters=16, block=0, subsample_factor=1)
x = res_block(x, nb_filters=16, block=0, subsample_factor=1)
x = res_block(x, nb_filters=16, block=0, subsample_factor=1)
x = res_block(x, nb_filters=32, block=1, subsample_factor=2)
x = res_block(x, nb_filters=32, block=1, subsample_factor=1)
x = res_block(x, nb_filters=32, block=1, subsample_factor=1)
x = res_block(x, nb_filters=64, block=2, subsample_factor=2)
x = res_block(x, nb_filters=64, block=2, subsample_factor=1)
x = res_block(x, nb_filters=64, block=2, subsample_factor=1)
x = res_block(x, nb_filters=64, block=2, subsample_factor=1)
x = res_block(x, nb_filters=128, block=3, subsample_factor=2)
x = res_block(x, nb_filters=128, block=3, subsample_factor=1)
x = res_block(x, nb_filters=128, block=3, subsample_factor=1)
x = res_block(x, nb_filters=128, block=3, subsample_factor=1)
x = res_block(x, nb_filters=256, block=4, subsample_factor=2)
x = res_block(x, nb_filters=256, block=4, subsample_factor=1)
x = res_block(x, nb_filters=256, block=4, subsample_factor=1)
x = res_block(x, nb_filters=256, block=4, subsample_factor=1)
x = BatchNormalization(axis=4)(x)
x = Activation('relu')(x)
x = AveragePooling3D(pool_size=(3, 3, 3), strides=(2, 2, 2), border_mode='valid')(x)
x = Flatten()(x)
x = Dense(1, activation='sigmoid', name='predictions')(x)
model = Model(img_input, x)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', 'precision', 'recall', 'fmeasure'])
model.summary()
return model
def define_model():
img_input = Input(shape=(32, 32, 64, 1))
x = Convolution3D(16, 5, 5, 5, subsample=(1, 1, 1), border_mode='same')(img_input)
x = res_block(x, nb_filters=16, block=0, subsample_factor=1)
x = res_block(x, nb_filters=16, block=0, subsample_factor=1)
x = res_block(x, nb_filters=16, block=0, subsample_factor=1)
x = res_block(x, nb_filters=32, block=1, subsample_factor=2)
x = res_block(x, nb_filters=32, block=1, subsample_factor=1)
x = res_block(x, nb_filters=32, block=1, subsample_factor=1)
x = res_block(x, nb_filters=64, block=2, subsample_factor=2)
x = res_block(x, nb_filters=64, block=2, subsample_factor=1)
x = res_block(x, nb_filters=64, block=2, subsample_factor=1)
x = res_block(x, nb_filters=128, block=3, subsample_factor=2)
x = res_block(x, nb_filters=128, block=3, subsample_factor=1)
x = res_block(x, nb_filters=128, block=3, subsample_factor=1)
x = BatchNormalization(axis=4)(x)
x = Activation('relu')(x)
x = AveragePooling3D(pool_size=(4, 4, 8))(x)
x = Flatten()(x)
x = Dense(1, activation='sigmoid', name='predictions')(x)
model = Model(img_input, x)
model.compile(optimizer='adam', loss='binary_crossentropy')
return model
step2_train_nodule_detector.py 文件源码
项目:kaggle_ndsb2017
作者: juliandewit
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def get_net(input_shape=(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1), load_weight_path=None, features=False, mal=False) -> Model:
inputs = Input(shape=input_shape, name="input_1")
x = inputs
x = AveragePooling3D(pool_size=(2, 1, 1), strides=(2, 1, 1), border_mode="same")(x)
x = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same', name='conv1', subsample=(1, 1, 1))(x)
x = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), border_mode='valid', name='pool1')(x)
# 2nd layer group
x = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same', name='conv2', subsample=(1, 1, 1))(x)
x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool2')(x)
if USE_DROPOUT:
x = Dropout(p=0.3)(x)
# 3rd layer group
x = Convolution3D(256, 3, 3, 3, activation='relu', border_mode='same', name='conv3a', subsample=(1, 1, 1))(x)
x = Convolution3D(256, 3, 3, 3, activation='relu', border_mode='same', name='conv3b', subsample=(1, 1, 1))(x)
x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool3')(x)
if USE_DROPOUT:
x = Dropout(p=0.4)(x)
# 4th layer group
x = Convolution3D(512, 3, 3, 3, activation='relu', border_mode='same', name='conv4a', subsample=(1, 1, 1))(x)
x = Convolution3D(512, 3, 3, 3, activation='relu', border_mode='same', name='conv4b', subsample=(1, 1, 1),)(x)
x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool4')(x)
if USE_DROPOUT:
x = Dropout(p=0.5)(x)
last64 = Convolution3D(64, 2, 2, 2, activation="relu", name="last_64")(x)
out_class = Convolution3D(1, 1, 1, 1, activation="sigmoid", name="out_class_last")(last64)
out_class = Flatten(name="out_class")(out_class)
out_malignancy = Convolution3D(1, 1, 1, 1, activation=None, name="out_malignancy_last")(last64)
out_malignancy = Flatten(name="out_malignancy")(out_malignancy)
model = Model(input=inputs, output=[out_class, out_malignancy])
if load_weight_path is not None:
model.load_weights(load_weight_path, by_name=False)
model.compile(optimizer=SGD(lr=LEARN_RATE, momentum=0.9, nesterov=True), loss={"out_class": "binary_crossentropy", "out_malignancy": mean_absolute_error}, metrics={"out_class": [binary_accuracy, binary_crossentropy], "out_malignancy": mean_absolute_error})
if features:
model = Model(input=inputs, output=[last64])
model.summary(line_length=140)
return model
def create_cnn_network(input_dim):
'''Base network to be shared (eq. to feature extraction).
'''
seq = Sequential()
nb_filter = [12, 6]
kern_size = 3
# conv layers
seq.add(Convolution3D(nb_filter[0], kern_size, kern_size, kern_size, input_shape=input_dim,
border_mode='valid', dim_ordering='th', activation='relu'))
# seq.add(MaxPooling3D(pool_size=(2, 2, 2))) # downsample
seq.add(Dropout(.25))
# conv layer 2
# seq.add(Convolution3D(nb_filter[1], kern_size, kern_size, kern_size, border_mode='valid', dim_ordering='th',
# activation='relu'))
# # seq.add(MaxPooling3D(pool_size=(2, 2, 2), dim_ordering='th')) # downsample
# seq.add(Dropout(.25))
# dense layers
seq.add(Flatten())
seq.add(Dense(100, activation='relu'))
seq.add(Dropout(0.2))
seq.add(Dense(50, activation='relu'))
return seq
# load data
def create_cnn_network(input_dim):
'''Base network to be shared (eq. to feature extraction).
'''
seq = Sequential()
nb_filter = [12, 6]
kern_size = 3
# conv layers
seq.add(Convolution3D(nb_filter[0], kern_size, kern_size, kern_size, input_shape=input_dim,
border_mode='valid', dim_ordering='th', activation='relu'))
# seq.add(MaxPooling3D(pool_size=(2, 2, 2))) # downsample
seq.add(Dropout(.25))
# conv layer 2
seq.add(Convolution3D(nb_filter[1], kern_size, kern_size, kern_size, border_mode='same', dim_ordering='th',
activation='relu'))
# seq.add(MaxPooling3D(pool_size=(2, 2, 2), dim_ordering='th')) # downsample
seq.add(Dropout(.25))
# dense layers
seq.add(Flatten())
seq.add(Dense(100, activation='relu'))
seq.add(Dropout(0.1))
seq.add(Dense(50, activation='relu'))
return seq
# load data
def create_cnn_network(input_dim):
'''Base network to be shared (eq. to feature extraction).
'''
seq = Sequential()
# conv layers
kern_size = 3
seq.add(Convolution3D(5, kern_size, kern_size, kern_size, input_shape=input_dim,
border_mode='valid', dim_ordering='th', activation='relu'))
seq.add(Dropout(.25))
seq.add(BatchNormalization(mode=2))
kern_size = 3
seq.add(Convolution3D(15, kern_size, kern_size, kern_size,
border_mode='valid', dim_ordering='th', activation='relu'))
seq.add(Dropout(.25))
seq.add(BatchNormalization(mode=2))
# dense layers
seq.add(Flatten())
seq.add(Dense(50, activation='relu'))
seq.add(Dropout(.25))
seq.add(BatchNormalization(mode=2))
return seq
# load data
def unet_model_xd3_2_6l_grid(nb_filter=48, dim=5, clen=3 , img_rows=224, img_cols=224 ): # NOTE that this procedure is/should be used with img_rows & img_cols as None
# aiming for architecture similar to the http://cs231n.stanford.edu/reports2016/317_Report.pdf
# Our model is six layers deep, consisting of a series of three CONV-RELU-POOL layyers (with 32, 32, and 64 3x3 filters), a CONV-RELU layer (with 128 3x3 filters), three UPSCALE-CONV-RELU lay- ers (with 64, 32, and 32 3x3 filters), and a final 1x1 CONV- SIGMOID layer to output pixel-level predictions. Its struc- ture resembles Figure 2, though with the number of pixels, filters, and levels as described here
## 3D CNN version of a previously developed unet_model_xd_6j
zconv = clen
inputs = Input((1, dim, img_rows, img_cols))
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(inputs)
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv4)
up6 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv2], mode='concat', concat_axis=1)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up6)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1) # original - only works for even dim
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up7)
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv7)
pool11 = MaxPooling3D(pool_size=(2, 1, 1))(conv7)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool11)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv12)
pool12 = MaxPooling3D(pool_size=(2, 1, 1))(conv12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv13)
pool13 = MaxPooling3D(pool_size=(2, 1, 1))(conv13)
if (dim < 16):
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool13)
else: # need one extra layer to get to 1D x 2D mask ...
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool13)
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv14)
pool14 = MaxPooling3D(pool_size=(2, 1, 1))(conv14)
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool14)
model = Model(input=inputs, output=conv8)
model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=[dice_coef])
#model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), loss=dice_coef_loss, metrics=[dice_coef])
return model
def unet_model_xd3_2_6l_grid(nb_filter=48, dim=5, clen=3 , img_rows=224, img_cols=224 ): # NOTE that this procedure is/should be used with img_rows & img_cols as None
# aiming for architecture similar to the http://cs231n.stanford.edu/reports2016/317_Report.pdf
# Our model is six layers deep, consisting of a series of three CONV-RELU-POOL layyers (with 32, 32, and 64 3x3 filters), a CONV-RELU layer (with 128 3x3 filters), three UPSCALE-CONV-RELU lay- ers (with 64, 32, and 32 3x3 filters), and a final 1x1 CONV- SIGMOID layer to output pixel-level predictions. Its struc- ture resembles Figure 2, though with the number of pixels, filters, and levels as described here
## 3D CNN version of a previously developed unet_model_xd_6j
zconv = clen
inputs = Input((1, dim, img_rows, img_cols))
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(inputs)
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv4)
up6 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv2], mode='concat', concat_axis=1)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up6)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1) # original - only works for even dim
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up7)
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv7)
pool11 = MaxPooling3D(pool_size=(2, 1, 1))(conv7)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool11)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv12)
pool12 = MaxPooling3D(pool_size=(2, 1, 1))(conv12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv13)
pool13 = MaxPooling3D(pool_size=(2, 1, 1))(conv13)
if (dim < 16):
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool13)
else: # need one extra layer to get to 1D x 2D mask ...
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool13)
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv14)
pool14 = MaxPooling3D(pool_size=(2, 1, 1))(conv14)
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool14)
model = Model(input=inputs, output=conv8)
model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=[dice_coef])
#model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), loss=dice_coef_loss, metrics=[dice_coef])
return model
def unet_model_xd3_2_6l_grid(nb_filter=48, dim=5, clen=3 , img_rows=224, img_cols=224 ): # NOTE that this procedure is/should be used with img_rows & img_cols as None
# aiming for architecture similar to the http://cs231n.stanford.edu/reports2016/317_Report.pdf
# Our model is six layers deep, consisting of a series of three CONV-RELU-POOL layyers (with 32, 32, and 64 3x3 filters), a CONV-RELU layer (with 128 3x3 filters), three UPSCALE-CONV-RELU lay- ers (with 64, 32, and 32 3x3 filters), and a final 1x1 CONV- SIGMOID layer to output pixel-level predictions. Its struc- ture resembles Figure 2, though with the number of pixels, filters, and levels as described here
## 3D CNN version of a previously developed unet_model_xd_6j
zconv = clen
inputs = Input((1, dim, img_rows, img_cols))
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(inputs)
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv4)
up6 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv2], mode='concat', concat_axis=1)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up6)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1) # original - only works for even dim
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up7)
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv7)
pool11 = MaxPooling3D(pool_size=(2, 1, 1))(conv7)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool11)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv12)
pool12 = MaxPooling3D(pool_size=(2, 1, 1))(conv12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv13)
pool13 = MaxPooling3D(pool_size=(2, 1, 1))(conv13)
if (dim < 16):
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool13)
else: # need one extra layer to get to 1D x 2D mask ...
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool13)
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv14)
pool14 = MaxPooling3D(pool_size=(2, 1, 1))(conv14)
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool14)
model = Model(input=inputs, output=conv8)
model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=[dice_coef])
#model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), loss=dice_coef_loss, metrics=[dice_coef])
return model
def unet_model_xd3_2_6l_grid(nb_filter=48, dim=5, clen=3 , img_rows=224, img_cols=224 ): # NOTE that this procedure is/should be used with img_rows & img_cols as None
# aiming for architecture similar to the http://cs231n.stanford.edu/reports2016/317_Report.pdf
# Our model is six layers deep, consisting of a series of three CONV-RELU-POOL layyers (with 32, 32, and 64 3x3 filters), a CONV-RELU layer (with 128 3x3 filters), three UPSCALE-CONV-RELU lay- ers (with 64, 32, and 32 3x3 filters), and a final 1x1 CONV- SIGMOID layer to output pixel-level predictions. Its struc- ture resembles Figure 2, though with the number of pixels, filters, and levels as described here
## 3D CNN version of a previously developed unet_model_xd_6j
zconv = clen
inputs = Input((1, dim, img_rows, img_cols))
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(inputs)
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv4)
up6 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv2], mode='concat', concat_axis=1)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up6)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1) # original - only works for even dim
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up7)
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv7)
pool11 = MaxPooling3D(pool_size=(2, 1, 1))(conv7)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool11)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv12)
pool12 = MaxPooling3D(pool_size=(2, 1, 1))(conv12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv13)
pool13 = MaxPooling3D(pool_size=(2, 1, 1))(conv13)
if (dim < 16):
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool13)
else: # need one extra layer to get to 1D x 2D mask ...
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool13)
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv14)
pool14 = MaxPooling3D(pool_size=(2, 1, 1))(conv14)
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool14)
model = Model(input=inputs, output=conv8)
model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=[dice_coef])
#model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), loss=dice_coef_loss, metrics=[dice_coef])
return model
def unet_model_xd3_2_6l_grid(nb_filter=48, dim=5, clen=3 , img_rows=224, img_cols=224 ): # NOTE that this procedure is/should be used with img_rows & img_cols as None
# aiming for architecture similar to the http://cs231n.stanford.edu/reports2016/317_Report.pdf
# Our model is six layers deep, consisting of a series of three CONV-RELU-POOL layyers (with 32, 32, and 64 3x3 filters), a CONV-RELU layer (with 128 3x3 filters), three UPSCALE-CONV-RELU lay- ers (with 64, 32, and 32 3x3 filters), and a final 1x1 CONV- SIGMOID layer to output pixel-level predictions. Its struc- ture resembles Figure 2, though with the number of pixels, filters, and levels as described here
## 3D CNN version of a previously developed unet_model_xd_6j
zconv = clen
inputs = Input((1, dim, img_rows, img_cols))
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(inputs)
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv4)
up6 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv2], mode='concat', concat_axis=1)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up6)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1) # original - only works for even dim
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up7)
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv7)
pool11 = MaxPooling3D(pool_size=(2, 1, 1))(conv7)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool11)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv12)
pool12 = MaxPooling3D(pool_size=(2, 1, 1))(conv12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv13)
pool13 = MaxPooling3D(pool_size=(2, 1, 1))(conv13)
if (dim < 16):
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool13)
else: # need one extra layer to get to 1D x 2D mask ...
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool13)
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv14)
pool14 = MaxPooling3D(pool_size=(2, 1, 1))(conv14)
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool14)
model = Model(input=inputs, output=conv8)
model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=[dice_coef])
#model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), loss=dice_coef_loss, metrics=[dice_coef])
return model
def unet_model_xd3_2_6l_grid(nb_filter=48, dim=5, clen=3 , img_rows=224, img_cols=224 ): # NOTE that this procedure is/should be used with img_rows & img_cols as None
# aiming for architecture similar to the http://cs231n.stanford.edu/reports2016/317_Report.pdf
# Our model is six layers deep, consisting of a series of three CONV-RELU-POOL layyers (with 32, 32, and 64 3x3 filters), a CONV-RELU layer (with 128 3x3 filters), three UPSCALE-CONV-RELU lay- ers (with 64, 32, and 32 3x3 filters), and a final 1x1 CONV- SIGMOID layer to output pixel-level predictions. Its struc- ture resembles Figure 2, though with the number of pixels, filters, and levels as described here
## 3D CNN version of a previously developed unet_model_xd_6j
zconv = clen
inputs = Input((1, dim, img_rows, img_cols))
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(inputs)
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv4)
up6 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv2], mode='concat', concat_axis=1)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up6)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1) # original - only works for even dim
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up7)
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv7)
pool11 = MaxPooling3D(pool_size=(2, 1, 1))(conv7)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool11)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv12)
pool12 = MaxPooling3D(pool_size=(2, 1, 1))(conv12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv13)
pool13 = MaxPooling3D(pool_size=(2, 1, 1))(conv13)
if (dim < 16):
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool13)
else: # need one extra layer to get to 1D x 2D mask ...
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool13)
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv14)
pool14 = MaxPooling3D(pool_size=(2, 1, 1))(conv14)
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool14)
model = Model(input=inputs, output=conv8)
model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=[dice_coef])
#model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), loss=dice_coef_loss, metrics=[dice_coef])
return model
def unet_model_xd3_2_6l_grid(nb_filter=48, dim=5, clen=3 , img_rows=224, img_cols=224 ): # NOTE that this procedure is/should be used with img_rows & img_cols as None
# aiming for architecture similar to the http://cs231n.stanford.edu/reports2016/317_Report.pdf
# Our model is six layers deep, consisting of a series of three CONV-RELU-POOL layyers (with 32, 32, and 64 3x3 filters), a CONV-RELU layer (with 128 3x3 filters), three UPSCALE-CONV-RELU lay- ers (with 64, 32, and 32 3x3 filters), and a final 1x1 CONV- SIGMOID layer to output pixel-level predictions. Its struc- ture resembles Figure 2, though with the number of pixels, filters, and levels as described here
## 3D CNN version of a previously developed unet_model_xd_6j
zconv = clen
inputs = Input((1, dim, img_rows, img_cols))
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(inputs)
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv4)
up6 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv2], mode='concat', concat_axis=1)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up6)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1) # original - only works for even dim
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up7)
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv7)
pool11 = MaxPooling3D(pool_size=(2, 1, 1))(conv7)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool11)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv12)
pool12 = MaxPooling3D(pool_size=(2, 1, 1))(conv12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv13)
pool13 = MaxPooling3D(pool_size=(2, 1, 1))(conv13)
if (dim < 16):
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool13)
else: # need one extra layer to get to 1D x 2D mask ...
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool13)
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv14)
pool14 = MaxPooling3D(pool_size=(2, 1, 1))(conv14)
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool14)
model = Model(input=inputs, output=conv8)
model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=[dice_coef])
#model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), loss=dice_coef_loss, metrics=[dice_coef])
return model
def unet_model_xd3_2_6l_grid(nb_filter=48, dim=5, clen=3 , img_rows=224, img_cols=224 ): # NOTE that this procedure is/should be used with img_rows & img_cols as None
# aiming for architecture similar to the http://cs231n.stanford.edu/reports2016/317_Report.pdf
# Our model is six layers deep, consisting of a series of three CONV-RELU-POOL layyers (with 32, 32, and 64 3x3 filters), a CONV-RELU layer (with 128 3x3 filters), three UPSCALE-CONV-RELU lay- ers (with 64, 32, and 32 3x3 filters), and a final 1x1 CONV- SIGMOID layer to output pixel-level predictions. Its struc- ture resembles Figure 2, though with the number of pixels, filters, and levels as described here
## 3D CNN version of a previously developed unet_model_xd_6j
zconv = clen
inputs = Input((1, dim, img_rows, img_cols))
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(inputs)
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv4)
up6 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv2], mode='concat', concat_axis=1)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up6)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1) # original - only works for even dim
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up7)
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv7)
pool11 = MaxPooling3D(pool_size=(2, 1, 1))(conv7)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool11)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv12)
pool12 = MaxPooling3D(pool_size=(2, 1, 1))(conv12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv13)
pool13 = MaxPooling3D(pool_size=(2, 1, 1))(conv13)
if (dim < 16):
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool13)
else: # need one extra layer to get to 1D x 2D mask ...
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool13)
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv14)
pool14 = MaxPooling3D(pool_size=(2, 1, 1))(conv14)
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool14)
model = Model(input=inputs, output=conv8)
model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=[dice_coef])
#model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), loss=dice_coef_loss, metrics=[dice_coef])
return model
def unet_model_xd3_2_6l_grid(nb_filter=48, dim=5, clen=3 , img_rows=224, img_cols=224 ): # NOTE that this procedure is/should be used with img_rows & img_cols as None
# aiming for architecture similar to the http://cs231n.stanford.edu/reports2016/317_Report.pdf
# Our model is six layers deep, consisting of a series of three CONV-RELU-POOL layyers (with 32, 32, and 64 3x3 filters), a CONV-RELU layer (with 128 3x3 filters), three UPSCALE-CONV-RELU lay- ers (with 64, 32, and 32 3x3 filters), and a final 1x1 CONV- SIGMOID layer to output pixel-level predictions. Its struc- ture resembles Figure 2, though with the number of pixels, filters, and levels as described here
## 3D CNN version of a previously developed unet_model_xd_6j
zconv = clen
inputs = Input((1, dim, img_rows, img_cols))
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(inputs)
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv4)
up6 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv2], mode='concat', concat_axis=1)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up6)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1) # original - only works for even dim
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up7)
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv7)
pool11 = MaxPooling3D(pool_size=(2, 1, 1))(conv7)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool11)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv12)
pool12 = MaxPooling3D(pool_size=(2, 1, 1))(conv12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv13)
pool13 = MaxPooling3D(pool_size=(2, 1, 1))(conv13)
if (dim < 16):
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool13)
else: # need one extra layer to get to 1D x 2D mask ...
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool13)
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv14)
pool14 = MaxPooling3D(pool_size=(2, 1, 1))(conv14)
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool14)
model = Model(input=inputs, output=conv8)
model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=[dice_coef])
#model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), loss=dice_coef_loss, metrics=[dice_coef])
return model
def unet_model_xd3_2_6l_grid(nb_filter=48, dim=5, clen=3 , img_rows=224, img_cols=224 ): # NOTE that this procedure is/should be used with img_rows & img_cols as None
# aiming for architecture similar to the http://cs231n.stanford.edu/reports2016/317_Report.pdf
# Our model is six layers deep, consisting of a series of three CONV-RELU-POOL layyers (with 32, 32, and 64 3x3 filters), a CONV-RELU layer (with 128 3x3 filters), three UPSCALE-CONV-RELU lay- ers (with 64, 32, and 32 3x3 filters), and a final 1x1 CONV- SIGMOID layer to output pixel-level predictions. Its struc- ture resembles Figure 2, though with the number of pixels, filters, and levels as described here
## 3D CNN version of a previously developed unet_model_xd_6j
zconv = clen
inputs = Input((1, dim, img_rows, img_cols))
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(inputs)
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv4)
up6 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv2], mode='concat', concat_axis=1)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up6)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1) # original - only works for even dim
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up7)
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv7)
pool11 = MaxPooling3D(pool_size=(2, 1, 1))(conv7)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool11)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv12)
pool12 = MaxPooling3D(pool_size=(2, 1, 1))(conv12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv13)
pool13 = MaxPooling3D(pool_size=(2, 1, 1))(conv13)
if (dim < 16):
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool13)
else: # need one extra layer to get to 1D x 2D mask ...
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool13)
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv14)
pool14 = MaxPooling3D(pool_size=(2, 1, 1))(conv14)
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool14)
model = Model(input=inputs, output=conv8)
model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=[dice_coef])
#model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), loss=dice_coef_loss, metrics=[dice_coef])
return model
def unet_model_xd3_2_6l_grid(nb_filter=48, dim=5, clen=3 , img_rows=224, img_cols=224 ): # NOTE that this procedure is/should be used with img_rows & img_cols as None
# aiming for architecture similar to the http://cs231n.stanford.edu/reports2016/317_Report.pdf
# Our model is six layers deep, consisting of a series of three CONV-RELU-POOL layyers (with 32, 32, and 64 3x3 filters), a CONV-RELU layer (with 128 3x3 filters), three UPSCALE-CONV-RELU lay- ers (with 64, 32, and 32 3x3 filters), and a final 1x1 CONV- SIGMOID layer to output pixel-level predictions. Its struc- ture resembles Figure 2, though with the number of pixels, filters, and levels as described here
## 3D CNN version of a previously developed unet_model_xd_6j
zconv = clen
inputs = Input((1, dim, img_rows, img_cols))
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(inputs)
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv4)
up6 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv2], mode='concat', concat_axis=1)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up6)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1) # original - only works for even dim
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up7)
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv7)
pool11 = MaxPooling3D(pool_size=(2, 1, 1))(conv7)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool11)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv12)
pool12 = MaxPooling3D(pool_size=(2, 1, 1))(conv12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv13)
pool13 = MaxPooling3D(pool_size=(2, 1, 1))(conv13)
if (dim < 16):
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool13)
else: # need one extra layer to get to 1D x 2D mask ...
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool13)
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv14)
pool14 = MaxPooling3D(pool_size=(2, 1, 1))(conv14)
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool14)
model = Model(input=inputs, output=conv8)
model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=[dice_coef])
#model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), loss=dice_coef_loss, metrics=[dice_coef])
return model
def unet_model_xd3_2_6l_grid(nb_filter=48, dim=5, clen=3 , img_rows=224, img_cols=224 ): # NOTE that this procedure is/should be used with img_rows & img_cols as None
# aiming for architecture similar to the http://cs231n.stanford.edu/reports2016/317_Report.pdf
# Our model is six layers deep, consisting of a series of three CONV-RELU-POOL layyers (with 32, 32, and 64 3x3 filters), a CONV-RELU layer (with 128 3x3 filters), three UPSCALE-CONV-RELU lay- ers (with 64, 32, and 32 3x3 filters), and a final 1x1 CONV- SIGMOID layer to output pixel-level predictions. Its struc- ture resembles Figure 2, though with the number of pixels, filters, and levels as described here
## 3D CNN version of a previously developed unet_model_xd_6j
zconv = clen
inputs = Input((1, dim, img_rows, img_cols))
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(inputs)
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv4)
up6 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv2], mode='concat', concat_axis=1)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up6)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1) # original - only works for even dim
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up7)
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv7)
pool11 = MaxPooling3D(pool_size=(2, 1, 1))(conv7)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool11)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv12)
pool12 = MaxPooling3D(pool_size=(2, 1, 1))(conv12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv13)
pool13 = MaxPooling3D(pool_size=(2, 1, 1))(conv13)
if (dim < 16):
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool13)
else: # need one extra layer to get to 1D x 2D mask ...
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool13)
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv14)
pool14 = MaxPooling3D(pool_size=(2, 1, 1))(conv14)
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool14)
model = Model(input=inputs, output=conv8)
model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=[dice_coef])
#model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), loss=dice_coef_loss, metrics=[dice_coef])
return model