def tsinalis(input_shape, n_classes):
"""
Input size should be [batch, 1d, 2d, ch] = (None, 1, 15000, 1)
"""
model = Sequential(name='Tsinalis')
model.add(Conv1D (kernel_size = (200), filters = 20, input_shape=input_shape, activation='relu'))
print(model.input_shape)
print(model.output_shape)
model.add(MaxPooling1D(pool_size = (20), strides=(10)))
print(model.output_shape)
model.add(keras.layers.core.Reshape([20,-1,1]))
print(model.output_shape)
model.add(Conv2D (kernel_size = (20,30), filters = 400, activation='relu'))
print(model.output_shape)
model.add(MaxPooling2D(pool_size = (1,10), strides=(1,2)))
print(model.output_shape)
model.add(Flatten())
print(model.output_shape)
model.add(Dense (500, activation='relu'))
model.add(Dense (500, activation='relu'))
model.add(Dense(n_classes, activation = 'softmax',activity_regularizer=keras.regularizers.l2() ))
model.compile( loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(), metrics=[keras.metrics.categorical_accuracy])
return model
python类Conv2D()的实例源码
def create_Kao_Onet( weight_path = 'model48.h5'):
input = Input(shape = [48,48,3])
x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv1')(input)
x = PReLU(shared_axes=[1,2],name='prelu1')(x)
x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)
x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv2')(x)
x = PReLU(shared_axes=[1,2],name='prelu2')(x)
x = MaxPool2D(pool_size=3, strides=2)(x)
x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv3')(x)
x = PReLU(shared_axes=[1,2],name='prelu3')(x)
x = MaxPool2D(pool_size=2)(x)
x = Conv2D(128, (2, 2), strides=1, padding='valid', name='conv4')(x)
x = PReLU(shared_axes=[1,2],name='prelu4')(x)
x = Permute((3,2,1))(x)
x = Flatten()(x)
x = Dense(256, name='conv5') (x)
x = PReLU(name='prelu5')(x)
classifier = Dense(2, activation='softmax',name='conv6-1')(x)
bbox_regress = Dense(4,name='conv6-2')(x)
landmark_regress = Dense(10,name='conv6-3')(x)
model = Model([input], [classifier, bbox_regress, landmark_regress])
model.load_weights(weight_path, by_name=True)
return model
def make_teacher_model(train_data, validation_data, nb_epoch=3):
'''Train a simple CNN as teacher model.
'''
model = Sequential()
model.add(Conv2D(64, 3, 3, input_shape=input_shape,
border_mode='same', name='conv1'))
model.add(MaxPooling2D(name='pool1'))
model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2'))
model.add(MaxPooling2D(name='pool2'))
model.add(Flatten(name='flatten'))
model.add(Dense(64, activation='relu', name='fc1'))
model.add(Dense(nb_class, activation='softmax', name='fc2'))
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=0.01, momentum=0.9),
metrics=['accuracy'])
train_x, train_y = train_data
history = model.fit(train_x, train_y, nb_epoch=nb_epoch,
validation_data=validation_data)
return model, history
def test_keras_import(self):
# Pad 1D
model = Sequential()
model.add(ZeroPadding1D(2, input_shape=(224, 3)))
model.add(Conv1D(32, 7, strides=2))
model.build()
self.pad_test(model, 'pad_w', 2)
# Pad 2D
model = Sequential()
model.add(ZeroPadding2D(2, input_shape=(224, 224, 3)))
model.add(Conv2D(32, 7, strides=2))
model.build()
self.pad_test(model, 'pad_w', 2)
# Pad 3D
model = Sequential()
model.add(ZeroPadding3D(2, input_shape=(224, 224, 224, 3)))
model.add(Conv3D(32, 7, strides=2))
model.build()
self.pad_test(model, 'pad_w', 2)
# ********** Export json tests **********
# ********** Data Layers Test **********
def get_model():
inputs = Input(shape=(64, 64, 3))
conv_1 = Conv2D(1, (3, 3), strides=(1, 1), padding='same')(inputs)
act_1 = Activation('relu')(conv_1)
conv_2 = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(act_1)
act_2 = Activation('relu')(conv_2)
deconv_1 = Conv2DTranspose(64, (3, 3), strides=(1, 1), padding='same')(act_2)
act_3 = Activation('relu')(deconv_1)
merge_1 = concatenate([act_3, act_1], axis=3)
deconv_2 = Conv2DTranspose(1, (3, 3), strides=(1, 1), padding='same')(merge_1)
act_4 = Activation('relu')(deconv_2)
model = Model(inputs=[inputs], outputs=[act_4])
model.compile(optimizer='adadelta', loss=dice_coef_loss, metrics=[dice_coef])
return model
def create_Kao_Rnet (weight_path = 'model24.h5'):
input = Input(shape=[24, 24, 3]) # change this shape to [None,None,3] to enable arbitraty shape input
x = Conv2D(28, (3, 3), strides=1, padding='valid', name='conv1')(input)
x = PReLU(shared_axes=[1, 2], name='prelu1')(x)
x = MaxPool2D(pool_size=3,strides=2, padding='same')(x)
x = Conv2D(48, (3, 3), strides=1, padding='valid', name='conv2')(x)
x = PReLU(shared_axes=[1, 2], name='prelu2')(x)
x = MaxPool2D(pool_size=3, strides=2)(x)
x = Conv2D(64, (2, 2), strides=1, padding='valid', name='conv3')(x)
x = PReLU(shared_axes=[1, 2], name='prelu3')(x)
x = Permute((3, 2, 1))(x)
x = Flatten()(x)
x = Dense(128, name='conv4')(x)
x = PReLU( name='prelu4')(x)
classifier = Dense(2, activation='softmax', name='conv5-1')(x)
bbox_regress = Dense(4, name='conv5-2')(x)
model = Model([input], [classifier, bbox_regress])
model.load_weights(weight_path, by_name=True)
return model
def create_actor_network(self, state_size, action_dim):
"""Create actor network."""
print ("[MESSAGE] Build actor network.""")
S = Input(shape=state_size)
h_0 = Conv2D(32, (3, 3), padding="same",
kernel_regularizer=l2(0.0001),
activation="relu")(S)
h_1 = Conv2D(32, (3, 3), padding="same",
kernel_regularizer=l2(0.0001),
activation="relu")(h_0)
h_1 = AveragePooling2D(2, 2)(h_1)
h_1 = Flatten()(h_1)
h_1 = Dense(600, activation="relu")(h_1)
A = Dense(action_dim, activation="softmax")(h_1)
model = Model(inputs=S, outputs=A)
return model, model.trainable_weights, S
def _conv_bn_relu(**conv_params):
"""Helper to build a conv -> BN -> relu residual unit activation function.
This is the original ResNet v1 scheme in https://arxiv.org/abs/1512.03385
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
dilation_rate = conv_params.setdefault("dilation_rate", (1, 1))
conv_name = conv_params.setdefault("conv_name", None)
bn_name = conv_params.setdefault("bn_name", None)
relu_name = conv_params.setdefault("relu_name", None)
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(x):
x = Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
dilation_rate=dilation_rate,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
name=conv_name)(x)
return _bn_relu(x, bn_name=bn_name, relu_name=relu_name)
return f
def _bn_relu_conv(**conv_params):
"""Helper to build a BN -> relu -> conv residual unit with full pre-activation function.
This is the ResNet v2 scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
dilation_rate = conv_params.setdefault("dilation_rate", (1, 1))
conv_name = conv_params.setdefault("conv_name", None)
bn_name = conv_params.setdefault("bn_name", None)
relu_name = conv_params.setdefault("relu_name", None)
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(x):
activation = _bn_relu(x, bn_name=bn_name, relu_name=relu_name)
return Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
dilation_rate=dilation_rate,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
name=conv_name)(activation)
return f
def __initial_conv_block(input, k=1, dropout=0.0, initial=False):
init = input
channel_axis = 1 if K.image_dim_ordering() == 'th' else -1
# Check if input number of filters is same as 16 * k, else create convolution2d for this input
if initial:
if K.image_dim_ordering() == 'th':
init = Conv2D(16 * k, (1, 1), kernel_initializer='he_normal', padding='same')(init)
else:
init = Conv2D(16 * k, (1, 1), kernel_initializer='he_normal', padding='same')(init)
x = BatchNormalization(axis=channel_axis)(input)
x = Activation('relu')(x)
x = Conv2D(16 * k, (3, 3), padding='same', kernel_initializer='he_normal')(x)
if dropout > 0.0:
x = Dropout(dropout)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = Conv2D(16 * k, (3, 3), padding='same', kernel_initializer='he_normal')(x)
m = add([init, x])
return m
def make_model(dense_layer_sizes, filters, kernel_size, pool_size):
'''Creates model comprised of 2 convolutional layers followed by dense layers
dense_layer_sizes: List of layer sizes.
This list has one number for each layer
filters: Number of convolutional filters in each convolutional layer
kernel_size: Convolutional kernel size
pool_size: Size of pooling area for max pooling
'''
model = Sequential()
model.add(Conv2D(filters, kernel_size,
padding='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(Conv2D(filters, kernel_size))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
for layer_size in dense_layer_sizes:
model.add(Dense(layer_size))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
return model
def make_teacher_model(train_data, validation_data, epochs=3):
'''Train a simple CNN as teacher model.
'''
model = Sequential()
model.add(Conv2D(64, 3, input_shape=input_shape,
padding='same', name='conv1'))
model.add(MaxPooling2D(2, name='pool1'))
model.add(Conv2D(64, 3, padding='same', name='conv2'))
model.add(MaxPooling2D(2, name='pool2'))
model.add(Flatten(name='flatten'))
model.add(Dense(64, activation='relu', name='fc1'))
model.add(Dense(num_class, activation='softmax', name='fc2'))
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=0.01, momentum=0.9),
metrics=['accuracy'])
train_x, train_y = train_data
history = model.fit(train_x, train_y,
epochs=epochs,
validation_data=validation_data)
return model, history
def get_model():
input_shape = (image_size, image_size, 3)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), padding='same',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(n_classes, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(GlobalAveragePooling2D())
print (model.summary())
#sys.exit(0) #
model.compile(loss=keras.losses.mean_squared_error,
optimizer= keras.optimizers.Adadelta())
return model
def plot_network(image, model, label=None):
layer_names = [l.name for l in model.layers if isinstance(l,Conv2D)]
n_conv = len(layer_names)
n_axes = n_conv
prediction = model.predict(np.expand_dims(image,0))
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
fig, [axlist1, axlist2] = plt.subplots(2,n_conv)
diagnosis = ["negative", "positive"]
for j in range(n_conv):
plot_heatmap(image, model, layer_names[j],"abnormal",axlist1[j])
# axlist1[j].set_xlabel(layer_names[j] + "ab")
for j in range(n_conv):
plot_heatmap(image, model, layer_names[j],"normal",axlist2[j],cmap=plt.cm.inferno)
fig.suptitle("Prediction: {}, {}".format(prediction,label))
fig.show()
def _conv_block(layer, num_conv_layers, num_filters):
"""Build a conv block on top of inputs
:param inputs: Keras Layer object representing the VGG net up to this
point
:param num_conv_layers: int for the number of convolutional layers to
include in this block
:param num_filters: int for the number of filters per convolutional
layer
"""
for _ in range(num_conv_layers - 1):
layer = Conv2D(
filters=num_filters, kernel_size=(3, 3), padding='same',
activation='relu'
)(layer)
layer = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(layer)
return layer
def cnn(height, width):
question_input = Input(shape=(height, width, 1), name='question_input')
conv1_Q = Conv2D(512, (2, 320), activation='sigmoid', padding='valid',
kernel_regularizer=regularizers.l2(0.01),
kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.02))(question_input)
Max1_Q = MaxPooling2D((29, 1), strides=(1, 1), padding='valid')(conv1_Q)
F1_Q = Flatten()(Max1_Q)
Drop1_Q = Dropout(0.25)(F1_Q)
predictQ = Dense(32, activation='relu',
kernel_regularizer=regularizers.l2(0.01),
kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.02))(Drop1_Q)
prediction2 = Dropout(0.25)(predictQ)
predictions = Dense(1, activation='relu')(prediction2)
model = Model(inputs=[question_input],
outputs=predictions)
model.compile(loss='mean_squared_error',
optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0))
# model.compile(loss='mean_squared_error',
# optimizer='nadam')
return model
def block(self, num_filters, num_layers, kernel_size, strides, input_tensor):
x = Conv2D(num_layers, (1, 1), strides=strides)(input_tensor)
x = Activation(selu)(x)
x = Conv2D(num_filters, kernel_size, padding='same')(x)
x = Activation(selu)(x)
x = Conv2D(num_filters*4, (1, 1))(x)
shortcut = Conv2D(num_filters*4, (1, 1), strides=strides,
)(input_tensor)
x = layers.add([x, shortcut])
x = Activation(selu)(x)
return x
def keepsize_256(nx, ny, noise, depth, activation='relu', n_filters=64, l2_reg=1e-7):
"""
Deep residual network that keeps the size of the input throughout the whole network
"""
def residual(inputs, n_filters):
x = ReflectionPadding2D()(inputs)
x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x = BatchNormalization()(x)
x = Activation(activation)(x)
x = ReflectionPadding2D()(x)
x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x = BatchNormalization()(x)
x = add([x, inputs])
return x
inputs = Input(shape=(nx, ny, 1))
x = GaussianNoise(noise)(inputs)
x = ReflectionPadding2D()(x)
x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x0 = Activation(activation)(x)
x = residual(x0, n_filters)
for i in range(depth-1):
x = residual(x, n_filters)
x = ReflectionPadding2D()(x)
x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x = BatchNormalization()(x)
x = add([x, x0])
# Upsampling for superresolution
x = UpSampling2D()(x)
x = ReflectionPadding2D()(x)
x = Conv2D(4*n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x = Activation(activation)(x)
final = Conv2D(1, (1, 1), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
return Model(inputs=inputs, outputs=final)
def create_network():
input_img = Input(shape=INPUT_SHAPE)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
model = Model(input_img, decoded)
model.compile(optimizer='adadelta', loss='binary_crossentropy')
return KerasNetwork(model, 'weights_conv_autoencoder.hd5')
def create_network():
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=INPUT_SHAPE))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return KerasNetwork(model, 'cnn_weights.hd5')
def net_input(env):
"""
Create input part of the network with optional prescaling.
:return: input_tensor, output_tensor
"""
in_t = Input(shape=env.observation_space.shape, name='input')
out_t = Conv2D(32, 5, 5, activation='relu', border_mode='same')(in_t)
out_t = MaxPooling2D((2, 2))(out_t)
out_t = Conv2D(32, 5, 5, activation='relu', border_mode='same')(out_t)
out_t = MaxPooling2D((2, 2))(out_t)
out_t = Conv2D(64, 4, 4, activation='relu', border_mode='same')(out_t)
out_t = MaxPooling2D((2, 2))(out_t)
out_t = Conv2D(64, 3, 3, activation='relu', border_mode='same')(out_t)
out_t = Flatten(name='flat')(out_t)
out_t = Dense(512, name='l1', activation='relu')(out_t)
return in_t, out_t
def make_teacher_model(train_data, validation_data, epochs=3):
'''Train a simple CNN as teacher model.
'''
model = Sequential()
model.add(Conv2D(64, 3, input_shape=input_shape,
padding='same', name='conv1'))
model.add(MaxPooling2D(2, name='pool1'))
model.add(Conv2D(64, 3, padding='same', name='conv2'))
model.add(MaxPooling2D(2, name='pool2'))
model.add(Flatten(name='flatten'))
model.add(Dense(64, activation='relu', name='fc1'))
model.add(Dense(num_class, activation='softmax', name='fc2'))
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=0.01, momentum=0.9),
metrics=['accuracy'])
train_x, train_y = train_data
history = model.fit(train_x, train_y,
epochs=epochs,
validation_data=validation_data)
return model, history
def make_init_model():
input_data = Input(shape=(32, 32, 3))
init_model_index = random.randint(1, 4)
init_model_index = 2
if init_model_index == 1: # one conv layer with kernel num = 64
stem_conv_1 = Conv2D(64, (1, 1), padding='same')(input_data)
elif init_model_index == 2: # two conv layers with kernel num = 64
stem_conv_1 = Conv2D(64, (1, 1), padding='same')(input_data)
stem_conv_2 = Conv2D(64, (1, 1), padding='same')(stem_conv_1)
elif init_model_index == 3: # one conv layer with a wider kernel num = 128
stem_conv_1 = Conv2D(128, (1, 1), padding='same')(input_data)
elif init_model_index == 4: # two conv layers with a wider kernel_num = 128
stem_conv_1 = Conv2D(128, (1, 1), padding='same')(input_data)
stem_conv_2 = Conv2D(128, (1, 1), padding='same')(stem_conv_1)
stem_global_pooling_1 = GlobalMaxPooling2D()(stem_conv_1)
stem_softmax_1 = Activation('softmax')(stem_global_pooling_1)
model = Model(inputs=input_data, outputs=stem_softmax_1)
return model
def Build(model_list):
print model_list
for idx, layer in enumerate(model_list):
type = layer[0]
if type == 'InputLayer':
input = Input(shape=layer[1])
x = input
elif type == 'Conv2D':
x = Conv2D(filters=layer[2], kernel_size=layer[1], padding='same')(x)
elif type == 'InceptionBlock':
x = inception_block(x, idx)
elif type == 'ResidualBlock':
x = residual_block(x, layer[1], idx)
elif type == "GlobalMaxPooling2D":
x = GlobalMaxPooling2D()(x)
elif type == "Activation":
x = Activation('softmax')(x)
model = Model(inputs=input, outputs=x)
return model
def conv_wider(model):
model_list = get_model_list(model)
for idx, layer in enumerate(model_list):
if layer[0] == 'Conv2D':
wider_layer = layer
insert_idx = idx + 1
# wider operation: filters * 2
wider_layer[2] *= 2
# if next layer is residual layer, we need to change residual layer's input shape
while (model_list[insert_idx][0] == 'ResidualBlock'):
model_list[insert_idx][1] = wider_layer[2]
insert_idx = insert_idx + 1
new_model = Build(model_list)
return new_model
def add_skipping(model):
model_list = get_model_list(model)
insert_idx = -1
# TODO: need to get the output shape from the last layer, use it as a parameter
for idx, layer in enumerate(model_list):
if layer[0] == 'Conv2D' or layer[0] == 'InceptionBlock' or layer[0] == 'ResidualBlock':
insert_idx = idx + 1
if layer[0] == 'Conv2D':
pre_output_shape = layer[2]
else:
pre_output_shape = layer[1]
if insert_idx != -1:
model_list.insert(insert_idx, ['ResidualBlock', pre_output_shape])
new_model = Build(model_list)
return new_model
def update(self):
self.type2ind = {}
for node in self.nodes():
import re
ind = int(re.findall(r'^\w+?(\d+)$', node.name)[0])
self.type2ind[node.type] = self.type2ind.get(node.type, []) + [ind]
for node in nx.topological_sort(self):
if node.type in ['Conv2D', 'Group', 'Conv2D_Pooling']:
plus = 1
else:
plus = 0
if len(self.predecessors(node)) == 0:
node.depth = 0
else:
pre_depth = [_node.depth for _node in self.predecessors(node)]
pre_depth = max(pre_depth)
node.depth = self.max_depth = pre_depth + plus
def get_model_list(self, model):
model_list = []
model_dict = json.loads(model.to_json())
model_layer = model_dict['config']['layers']
for layer in model_layer:
layer_name = layer['config']['name']
layer_output_shape = model.get_layer(layer_name).output_shape
if layer['class_name'] == 'Conv2D' and layer['config']['name'].lower().startswith('conv'):
model_list.append([layer['class_name'], layer['config']['name'],
{'kernel_size': layer['config']['kernel_size'],
'filters': layer['config']['filters']}])
elif layer['class_name'] == 'GlobalMaxPooling2D':
model_list.append([layer['class_name'],
layer['config']['name'],
{}])
elif layer['class_name'] == 'Activation':
model_list.append([layer['class_name'],
layer['config']['name'],
{'activation_type': 'softmax'}])
return model_list
def build_model(self):
initializer = initializers.random_normal(stddev=0.02)
model = Sequential()
if self.padding:
model.add(ZeroPadding2D(padding=(1, 0), data_format="channels_first", input_shape=(self.layers, self.rows, self.columns)))
model.add(Conv2D(32, (8, 8), activation="relu", data_format="channels_first",
strides=(4, 4), kernel_initializer=initializer, padding='same',
input_shape=(self.layers, self.rows, self.columns)))
model.add(Conv2D(64, (4, 4), activation="relu", data_format="channels_first", strides=(2, 2),
kernel_initializer=initializer, padding='same'))
model.add(Conv2D(64, (3, 3), activation="relu", data_format="channels_first", strides=(1, 1),
kernel_initializer=initializer, padding='same'))
model.add(Flatten())
model.add(Dense(512, activation="relu", kernel_initializer=initializer))
model.add(Dense(self.actions_num, kernel_initializer=initializer))
adam = Adam(lr=1e-6)
model.compile(loss='mse', optimizer=adam)
return model
def deflating_convolution(inputs, n_deflation_layers, n_filters_init=32, noise=None, name_prefix=None):
def add_linear_noise(x, eps, ind):
flattened_deflated = Reshape((-1,), name=name_prefix + '_conv_flatten_{}'.format(ind))(x)
deflated_shape = ker.int_shape(x)
deflated_size = deflated_shape[1] * deflated_shape[2] * deflated_shape[3]
noise_transformed = Dense(deflated_size, activation=None,
name=name_prefix + '_conv_noise_dense_{}'.format(ind))(eps)
added_noise = Add(name=name_prefix + '_conv_add_noise_{}'.format(ind))([noise_transformed, flattened_deflated])
x = Reshape((deflated_shape[1], deflated_shape[2], deflated_shape[3]),
name=name_prefix + '_conv_backreshape_{}'.format(ind))(added_noise)
return x
deflated = Conv2D(filters=n_filters_init, kernel_size=(5, 5), strides=(2, 2),
padding='same', activation='relu', name=name_prefix + '_conv_0')(inputs)
if noise is not None:
deflated = add_linear_noise(deflated, noise, 0)
for i in range(1, n_deflation_layers):
deflated = Conv2D(filters=n_filters_init * (2**i), kernel_size=(5, 5), strides=(2, 2),
padding='same', activation='relu', name=name_prefix + '_conv_{}'.format(i))(deflated)
# if noise is not None:
# deflated = add_linear_noise(deflated, noise, i)
return deflated