def tsinalis(input_shape, n_classes):
"""
Input size should be [batch, 1d, 2d, ch] = (None, 1, 15000, 1)
"""
model = Sequential(name='Tsinalis')
model.add(Conv1D (kernel_size = (200), filters = 20, input_shape=input_shape, activation='relu'))
print(model.input_shape)
print(model.output_shape)
model.add(MaxPooling1D(pool_size = (20), strides=(10)))
print(model.output_shape)
model.add(keras.layers.core.Reshape([20,-1,1]))
print(model.output_shape)
model.add(Conv2D (kernel_size = (20,30), filters = 400, activation='relu'))
print(model.output_shape)
model.add(MaxPooling2D(pool_size = (1,10), strides=(1,2)))
print(model.output_shape)
model.add(Flatten())
print(model.output_shape)
model.add(Dense (500, activation='relu'))
model.add(Dense (500, activation='relu'))
model.add(Dense(n_classes, activation = 'softmax',activity_regularizer=keras.regularizers.l2() ))
model.compile( loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(), metrics=[keras.metrics.categorical_accuracy])
return model
python类layers()的实例源码
def largeann(input_shape, n_classes, layers=3, neurons=2000, dropout=0.35 ):
"""
for working with extracted features
"""
# gpu = switch_gpu()
# with K.tf.device('/gpu:{}'.format(gpu)):
# K.set_session(K.tf.Session(config=K.tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)))
model = Sequential(name='ann')
# model.gpu = gpu
for l in range(layers):
model.add(Dense (neurons, input_shape=input_shape, activation='elu', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(Dense(n_classes, activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[keras.metrics.categorical_accuracy])
return model
#%% everyhing recurrent for ANN
def set_params(mo, bparams):
i = 0
for la in mo.layers:
we = bparams[i:i+2]
print len(we)
la.set_weights(we)
i += 2
return mo
#with open("best_model_keras.pkl", 'r') as f:
# b_params = pkl.load(f)
#
#model = set_params(model, b_params)
#out = model.predict(xvl, batch_size=xvl.shape[0], verbose=0)
#error = np.mean(np.mean(np.power(out - yvl, 2), axis=1))
#print "Error vl", error
#sys.exit()
#init_p = get_params(model)
#with open("init_keras_param.pkl", 'w') as f:
# pkl.dump(init_p, f)
def make_model(dense_layer_sizes, filters, kernel_size, pool_size):
'''Creates model comprised of 2 convolutional layers followed by dense layers
dense_layer_sizes: List of layer sizes.
This list has one number for each layer
filters: Number of convolutional filters in each convolutional layer
kernel_size: Convolutional kernel size
pool_size: Size of pooling area for max pooling
'''
model = Sequential()
model.add(Conv2D(filters, kernel_size,
padding='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(Conv2D(filters, kernel_size))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
for layer_size in dense_layer_sizes:
model.add(Dense(layer_size))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
return model
def block(self, num_filters, num_layers, kernel_size, strides, input_tensor):
x = Conv2D(num_layers, (1, 1), strides=strides)(input_tensor)
x = Activation(selu)(x)
x = Conv2D(num_filters, kernel_size, padding='same')(x)
x = Activation(selu)(x)
x = Conv2D(num_filters*4, (1, 1))(x)
shortcut = Conv2D(num_filters*4, (1, 1), strides=strides,
)(input_tensor)
x = layers.add([x, shortcut])
x = Activation(selu)(x)
return x
def BiDi(input_shape,vocabSize,veclen,wordWeights,nLayers,nHidden,lr):
assert len(nHidden) == nLayers, '#Neurons for each layer does not match #Layers'
r_flag = True
_Input = Input(shape = (input_shape,),dtype = 'int32')
E = keras.layers.embeddings.Embedding(vocabSize,veclen,weights=(wordWeights,),mask_zero = True)(_Input)
for ind in range(nLayers):
if ind == (nLayers-1):
r_flag = False
fwd_layer = keras.layers.recurrent.GRU(nHidden[ind],init='glorot_uniform',inner_init='orthogonal',activation='tanh',inner_activation='hard_sigmoid',return_sequences = r_flag)(E)
bkwd_layer = keras.layers.recurrent.GRU(nHidden[ind],init='glorot_uniform',inner_init='orthogonal',activation='tanh',inner_activation='hard_sigmoid',return_sequences = r_flag,go_backwards = True)(E)
E = merge([fwd_layer,bkwd_layer],mode = 'ave')
#nHidden/= 2
Output = Dense(1,activation = 'sigmoid')(Dropout(0.5)(E))
model = Model(input = _Input, output = Output)
opt = keras.optimizers.Adam(lr)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
def get_layers(self, name, next_layer=False, last_layer=False, type=None):
if type is None:
name2layer = {layer.name: layer for layer in self.model.layers}
else:
name2layer = {}
for layer in self.model.layers:
for t in type:
if t.lower() in layer.name.lower():
name2layer[layer.name] = layer
break
# name2layer = {layer.name: layer for layer in self.model.layers if type.lower() in layer.name.lower()}
def _get_layer(name):
return name2layer[name]
nodes = self.graph.get_nodes(name, next_layer, last_layer, type=type)
if not isinstance(nodes, list):
nodes = [nodes]
'''
for node in nodes:
if node.name not in name2layer:
embed()
'''
return map(_get_layer, [node.name for node in nodes])
def get_model_list(self, model):
model_list = []
model_dict = json.loads(model.to_json())
model_layer = model_dict['config']['layers']
for layer in model_layer:
layer_name = layer['config']['name']
layer_output_shape = model.get_layer(layer_name).output_shape
if layer['class_name'] == 'Conv2D' and layer['config']['name'].lower().startswith('conv'):
model_list.append([layer['class_name'], layer['config']['name'],
{'kernel_size': layer['config']['kernel_size'],
'filters': layer['config']['filters']}])
elif layer['class_name'] == 'GlobalMaxPooling2D':
model_list.append([layer['class_name'],
layer['config']['name'],
{}])
elif layer['class_name'] == 'Activation':
model_list.append([layer['class_name'],
layer['config']['name'],
{'activation_type': 'softmax'}])
return model_list
def buildModelLSTM_3(self):
model = Sequential()
layers = [self.inOutVecDim, 57, 57 * 2, 32, self.inOutVecDim]
model.add(LSTM(input_dim=layers[0], output_dim=layers[1],
return_sequences=False))
model.add(Dense(
output_dim=layers[4]))
model.add(Activation(self.activation))
optimizer = keras.optimizers.RMSprop(lr=0.001)
model.compile(loss="mae", optimizer=optimizer)
return model
def buildModelLSTM_4(self):
model = Sequential()
layers = [self.inOutVecDim, 57, 57 * 2, 57, self.inOutVecDim]
model.add(LSTM(input_dim=layers[0], output_dim=layers[1],
return_sequences=True))
model.add(LSTM(layers[2],
return_sequences=False))
model.add(Dense(output_dim=layers[4]))
model.add(Activation(self.activation))
optimizer = keras.optimizers.RMSprop(lr=0.001)
model.compile(loss="mae", optimizer=optimizer)
return model
def test_initial_state_GRU(self):
data = np.random.rand(1, 1, 2)
model = keras.models.Sequential()
model.add(keras.layers.GRU(5, input_shape=(1, 2), batch_input_shape=[1, 1, 2], stateful=True))
model.get_layer(index=1).reset_states()
coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output')
keras_output_1 = model.predict(data)
coreml_full_output_1 = coreml_model.predict({'data': data})
coreml_output_1 = coreml_full_output_1['output']
coreml_output_1 = np.expand_dims(coreml_output_1, 1)
np.testing.assert_array_almost_equal(coreml_output_1.T, keras_output_1)
hidden_state = (np.random.rand(1, 5))
model.get_layer(index=1).reset_states(states=hidden_state)
coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output')
spec = coreml_model.get_spec()
keras_output_2 = model.predict(data)
coreml_full_output_2 = coreml_model.predict({'data': data, spec.description.input[1].name: hidden_state[0]})
coreml_output_2 = coreml_full_output_2['output']
coreml_output_2 = np.expand_dims(coreml_output_2, 1)
np.testing.assert_array_almost_equal(coreml_output_2.T, keras_output_2)
def test_initial_state_SimpleRNN(self):
data = np.random.rand(1, 1, 2)
model = keras.models.Sequential()
model.add(keras.layers.SimpleRNN(5, input_shape=(1, 2), batch_input_shape=[1, 1, 2], stateful=True))
model.get_layer(index=1).reset_states()
coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output')
keras_output_1 = model.predict(data)
coreml_full_output_1 = coreml_model.predict({'data': data})
coreml_output_1 = coreml_full_output_1['output']
coreml_output_1 = np.expand_dims(coreml_output_1, 1)
np.testing.assert_array_almost_equal(coreml_output_1.T, keras_output_1)
hidden_state = np.random.rand(1, 5)
model.get_layer(index=1).reset_states(states=hidden_state)
coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output')
spec = coreml_model.get_spec()
keras_output_2 = model.predict(data)
coreml_full_output_2 = coreml_model.predict({'data': data, spec.description.input[1].name: hidden_state[0]})
coreml_output_2 = coreml_full_output_2['output']
coreml_output_2 = np.expand_dims(coreml_output_2, 1)
np.testing.assert_array_almost_equal(coreml_output_2.T, keras_output_2)
def buildConvolution(self, name):
filters = self.params.get('filters')
nb_filter = self.params.get('nb_filter')
assert filters
assert nb_filter
convs = []
for fsz in filters:
layer_name = '%s-conv-%d' % (name, fsz)
conv = Convolution2D(
nb_filter=nb_filter,
nb_row=fsz,
nb_col=self.wdim,
border_mode='valid',
init='glorot_uniform',
W_constraint=maxnorm(self.params.get('w_maxnorm')),
b_constraint=maxnorm(self.params.get('b_maxnorm')),
name=layer_name
)
convs.append(conv)
self.layers['%s-convolution' % name] = convs
def build_model():
input_tensor = Input(shape=(150, 150, 3))
vgg16_model = VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor)
dense = Flatten()( \
Dense(2048, activation='relu')( \
BN()( \
vgg16_model.layers[-1].output ) ) )
result = Activation('sigmoid')(\
Activation('linear')( \
Dense(4096)(\
dense) ) )
model = Model(input=vgg16_model.input, output=result)
for i in range(len(model.layers)):
print(i, model.layers[i])
for layer in model.layers[:12]: # default 15
layer.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam')
return model
#build_model()
def create_resnet50(input_img):
net = ResNet50(weights='imagenet', include_top=False,
input_tensor=input_img)
for layer in net.layers[1:]:
layer.trainable = False
net = Reshape((-1,))(net.outputs[0])
return net
def on_epoch_begin(self, epoch, logs=None):
super(MyLearningRateScheduler, self).on_epoch_begin(epoch, logs=logs)
if epoch > self.epoch_unfreeze:
for i, layer in enumerate(self.model.layers[1:]):
layer.trainable = i >= self.num_layers_to_freeze
else:
for layer in self.model.layers[1:-1]:
layer.trainable = False
self.model.layers[-1].trainable = True
if not self.recompiled_first or (not self.recompiled and epoch > self.epoch_unfreeze):
adam = keras.optimizers.Adam(lr=self.step_decay(epoch))
self.model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy',
custom_metrics.f2score_samples])
print self.model.summary()
if not self.recompiled_first:
self.recompiled_first = True
else:
self.recompiled = True
def basic_block(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):
"""Basic 3 X 3 convolution blocks for use on resnets with layers <= 34.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
"""
def f(input):
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv1 = Conv2D(filters=filters, kernel_size=(3, 3),
strides=init_strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(input)
else:
conv1 = _bn_relu_conv(filters=filters, kernel_size=(3, 3),
strides=init_strides)(input)
residual = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv1)
return _shortcut(input, residual)
return f
def resnet(repetition=2, k=1):
'''Wide Residual Network (with a slight modification)
depth == repetition*6 + 2
'''
from keras.models import Model
from keras.layers import Input, Dense, Flatten, AveragePooling2D
from keras.regularizers import l2
input_shape = (1, _img_len, _img_len)
output_dim = len(_columns)
x = Input(shape=input_shape)
z = conv2d(nb_filter=8, k_size=5, downsample=True)(x) # out_shape == 8, _img_len/ 2, _img_len/ 2
z = bn_lrelu(0.01)(z)
z = residual_block(nb_filter=k*16, repetition=repetition)(z) # out_shape == k*16, _img_len/ 4, _img_len/ 4
z = residual_block(nb_filter=k*32, repetition=repetition)(z) # out_shape == k*32, _img_len/ 8, _img_len/ 8
z = residual_block(nb_filter=k*64, repetition=repetition)(z) # out_shape == k*64, _img_len/16, _img_len/16
z = AveragePooling2D((_img_len/16, _img_len/16))(z)
z = Flatten()(z)
z = Dense(output_dim=output_dim, activation='sigmoid', W_regularizer=l2(_Wreg_l2), init='zero')(z)
return Model(input=x, output=z)
def residual_block(nb_filter, repetition):
'''(down dample ->) residual blocks ....... -> BatchNormalization -> LeakyReLU'''
from keras.layers import merge
def f(x):
for i in xrange(repetition):
if i == 0:
y = conv2d(nb_filter, downsample=True, k_size=1)(x)
z = conv2d(nb_filter, downsample=True)(x)
else:
y = x
z = bn_lrelu(0.01)(x)
z = conv2d(nb_filter)(z)
z = bn_lrelu(0.01)(z)
z = conv2d(nb_filter)(z)
x = merge([y, z], mode='sum')
return bn_lrelu(0.01)(x)
return f
deep_food.py 文件源码
项目:keras-resnet-food-reverse-engineering
作者: GINK03
项目源码
文件源码
阅读 35
收藏 0
点赞 0
评论 0
def build_model():
input_tensor = Input(shape=(224, 224, 3))
resnet_model = ResNet50(include_top=False, weights='imagenet', input_tensor=input_tensor)
dense = Flatten()( \
Dense(2048, activation='relu')( \
BN()( \
resnet_model.layers[-1].output ) ) )
result = Activation('sigmoid')( \
Dense(2048, activation="linear")(\
dense) )
model = Model(inputs=resnet_model.input, outputs=result)
for layer in model.layers[:139]: # default 179
#print(layer)
if 'BatchNormalization' in str(layer):
...
else:
layer.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam')
return model
def define_model(weights_path):
'''
Define model structure with weights.
'''
from resnet50 import ResNet50
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
resnet50_model = ResNet50()
fc1000 = resnet50_model.get_layer('fc1000').output
final_softmax = Dense(output_dim=2, activation='softmax')(fc1000)
resnet50_finetune_1skip = Model(input=resnet50_model.input, output=final_softmax)
resnet50_finetune_1skip.load_weights(weights_path)
resnet50_finetune_1skip.compile(loss="categorical_crossentropy",
optimizer='nadam',
metrics=['accuracy'])
return resnet50_finetune_1skip
def make_trainable(net, val):
net.trainable = val
for l in net.layers:
l.trainable = val
def make_trainable(net, val):
net.trainable = val
for l in net.layers:
l.trainable = val
def ann(input_shape, n_classes, layers=2, neurons=80, dropout=0.35 ):
"""
for working with extracted features
"""
model = Sequential(name='ann')
for l in range(layers):
model.add(Dense (neurons, input_shape=input_shape, activation='elu', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(Dense(n_classes, activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[keras.metrics.categorical_accuracy])
return model
def pure_rnn_do(input_shape, n_classes,layers=2, neurons=80, dropout=0.3):
"""
just replace ANN by RNNs
"""
model = Sequential(name='pure_rnn')
model.add(LSTM(neurons, return_sequences=False if layers==1 else True, input_shape=input_shape,dropout=dropout, recurrent_dropout=dropout))
for i in range(layers-1):
model.add(LSTM(neurons, return_sequences=False if i==layers-2 else True,dropout=dropout, recurrent_dropout=dropout))
model.add(Dense(n_classes, activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001), metrics=[keras.metrics.categorical_accuracy])
return model
def bi_lstm(input_shape, n_classes,layers=2, neurons=80, dropout=0.3):
"""
just replace ANN by RNNs
"""
model = Sequential(name='pure_rnn')
model.add(Bidirectional(LSTM(neurons, return_sequences=False if layers==1 else True, dropout=dropout, recurrent_dropout=dropout), input_shape=input_shape))
model.add(LSTM(neurons, return_sequences=False if layers==1 else True, input_shape=input_shape,dropout=dropout, recurrent_dropout=dropout))
for i in range(layers-1):
model.add(Bidirectional(LSTM(neurons, return_sequences=False if i==layers-2 else True,dropout=dropout, recurrent_dropout=dropout)))
model.add(Dense(n_classes, activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001), metrics=[keras.metrics.categorical_accuracy])
return model
def cnn_model():
'''
Construct the CNN model,
2*(Conv1D + relu + MaxPooling1D) + Flatten + 2*(Dense + relu) + Dense + softmax
Tissue inputs will be inserted after the first Dense/relu layer, if activate
'''
print('Contruct CNN model')
main_inputs = Input(shape=X_DATA[0].shape, name='sequence_inputs')
hidden = Conv1D(128, kernel_size=2, padding='same', activation='relu')(main_inputs)
hidden = MaxPooling1D(pool_size=10)(hidden)
hidden = Conv1D(128, kernel_size=2, padding='same', activation='relu')(hidden)
hidden = MaxPooling1D(pool_size=10)(hidden)
if ARGS.d:
hidden = Dropout(ARGS.d/100)(hidden)
hidden = Flatten()(hidden)
hidden = Dense(625)(hidden)
hidden = Activation('relu')(hidden)
if ARGS.T:
auxiliary_inputs = Input(shape=TISSUE_DATA[0].shape, name='tissue_inputs')
hidden = keras.layers.concatenate([hidden, auxiliary_inputs])
hidden = Dense(125)(hidden)
hidden = Activation('relu')(hidden)
outputs = Dense(CLASSES, activation='softmax')(hidden)
if ARGS.T:
model = Model(inputs=[main_inputs, auxiliary_inputs], outputs=outputs)
else:
model = Model(inputs=main_inputs, outputs=outputs)
model.summary()
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
model.save_weights('{}model.h5~'.format(ARGS.o))
return model
def dnn_model():
'''
Construct the DNN model,
Flatten + 2*(Dense + relu) + Dense + softmax
Tissue inputs will be inserted after the first Dense/relu layer, if activate
'''
print('Contruct DNN model')
main_inputs = Input(shape=X_DATA[0].shape, name='sequence_inputs')
hidden = Flatten()(main_inputs)
hidden = Dense(128)(hidden)
hidden = Activation('relu')(hidden)
if ARGS.T:
auxiliary_inputs = Input(shape=TISSUE_DATA[0].shape, name='tissue_inputs')
hidden = keras.layers.concatenate([hidden, auxiliary_inputs])
hidden = Dense(128)(hidden)
hidden = Activation('relu')(hidden)
outputs = Dense(CLASSES, activation='softmax')(hidden)
if ARGS.T:
model = Model(inputs=[main_inputs, auxiliary_inputs], outputs=outputs)
else:
model = Model(inputs=main_inputs, outputs=outputs)
model.summary()
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
model.save_weights('{}model.h5~'.format(ARGS.o))
return model
def get_params(mo):
para = []
for la in mo.layers:
par = la.get_weights()
para += par
return para
def _get_softmax_name(self):
"""
Looks for the name of the softmax layer.
:return: Softmax layer name
"""
for i, layer in enumerate(self.model.layers):
cfg = layer.get_config()
if 'activation' in cfg and cfg['activation'] == 'softmax':
return layer.name
raise Exception("No softmax layers found")