def test_tiny_mcrnn_music_tagger(self):
x_in = Input(shape=(4,6,1))
x = ZeroPadding2D(padding=(0, 1))(x_in)
x = BatchNormalization(axis=2, name='bn_0_freq')(x)
# Conv block 1
x = Convolution2D(2, 3, 3, border_mode='same', name='conv1')(x)
x = BatchNormalization(axis=3, mode=0, name='bn1')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(x)
# Conv block 2
x = Convolution2D(4, 3, 3, border_mode='same', name='conv2')(x)
x = BatchNormalization(axis=3, mode=0, name='bn2')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(x)
# Should get you (1,1,2,4)
x = Reshape((2, 4))(x)
x = GRU(32, return_sequences=True, name='gru1')(x)
x = GRU(32, return_sequences=False, name='gru2')(x)
# Create model.
model = Model(x_in, x)
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_keras_model(model, mode='random_zero_mean', delta=1e-2)
python类ELU的实例源码
def __init__(self, widths, vocab_size=5000):
from keras.models import Sequential
from keras.layers import Embedding, Dense, TimeDistributedMerge
from keras.layers.advanced_activations import ELU
from keras.preprocessing.sequence import pad_sequences
from keras.optimizers import SGD
self.n_classes = widths[-1]
self.vocab_size = vocab_size
self.word_to_int = {}
self.int_to_word = np.ndarray(shape=(vocab_size+1,), dtype='int64')
self.model = Sequential()
self.model.add(Embedding(vocab_size, widths[0]))
self.model.add(TimeDistributedMerge(mode='ave'))
for width in widths[1:-1]:
layer = Dense(output_dim=hidden_width, init='he_normal', activation=ELU(1.0))
self.model.add(layer)
self.model.add(
Dense(
n_classes,
init='zero',
activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
self.model.compile(loss='categorical_crossentropy', optimizer=sgd)
model_zoo.py 文件源码
项目:visual_turing_test-tutorial
作者: mateuszmalinowski
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def deep_mlp(self):
"""
Deep Multilayer Perceptrop.
"""
if self._config.num_mlp_layers == 0:
self.add(Dropout(0.5))
else:
for j in xrange(self._config.num_mlp_layers):
self.add(Dense(self._config.mlp_hidden_dim))
if self._config.mlp_activation == 'elu':
self.add(ELU())
elif self._config.mlp_activation == 'leaky_relu':
self.add(LeakyReLU())
elif self._config.mlp_activation == 'prelu':
self.add(PReLU())
else:
self.add(Activation(self._config.mlp_activation))
self.add(Dropout(0.5))
def fc_inception(input_tensor, n=3000, d=0.5):
br1 = Dense(n)(input_tensor)
br1 = LeakyReLU()(br1)
br1 = BatchNormalization()(br1)
br1 = Dropout(d)(br1)
br1 = Dense(int(n/3.0))(br1)
br2 = Dense(n)(input_tensor)
br2 = BatchNormalization()(br2)
br2 = ELU()(br2)
br2 = Dropout(d)(br2)
br2 = Dense(int(n/3.0))(br2)
br3 = Dense(int(n/3.0))(input_tensor)
br3 = BatchNormalization()(br3)
br3 = PReLU()(br3)
br3 = Dropout(d)(br3)
br3 = Dense(int(n/3.0))(br3)
br3 = BatchNormalization()(br3)
br3 = PReLU()(br3)
br3 = Dropout(d)(br3)
br3 = Dense(int(n/3.0))(br3)
br3 = BatchNormalization()(br3)
br3 = PReLU()(br3)
br3 = Dropout(d)(br3)
x = merge([br1, br2, br3], mode='concat', concat_axis=1)
return x
def create_model(input_shape, class_count):
inputs = Input(shape=input_shape)
# add one more dimension for convolution
x = Reshape(input_shape + (1, ))(inputs)
x = BatchNormalization()(x)
def convolution_block(filter_count, dropout):
def create(x):
x = Convolution2D(filter_count, 3, 3, border_mode='same')(x)
x = BatchNormalization()(x)
x = ELU()(x)
x = Convolution2D(filter_count, 3, 3, border_mode='same')(x)
x = BatchNormalization()(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(dropout)(x)
return x
return create
x = convolution_block(filter_count=32, dropout=0.1)(x)
x = convolution_block(filter_count=64, dropout=0.1)(x)
x = convolution_block(filter_count=64, dropout=0.1)(x)
x = convolution_block(filter_count=64, dropout=0.1)(x)
x = Flatten()(x)
x = Dense(class_count)(x)
x = BatchNormalization()(x)
predictions = Activation('softmax')(x)
model = Model(inputs, predictions)
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
return model
def test_elu():
from keras.layers.advanced_activations import ELU
for alpha in [0., .5, -1.]:
layer_test(ELU, kwargs={'alpha': alpha},
input_shape=(2, 3, 4))
def test_keras_export(self):
tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
'keras_export_test.json'), 'r')
response = json.load(tests)
tests.close()
net = yaml.safe_load(json.dumps(response['net']))
net = {'l0': net['Input'], 'l1': net['ELU']}
net['l0']['connection']['output'].append('l1')
inp = data(net['l0'], '', 'l0')['l0']
net = activation(net['l1'], [inp], 'l1')
model = Model(inp, net['l1'])
self.assertEqual(model.layers[1].__class__.__name__, 'ELU')
def activation(layer, layer_in, layerId):
out = {}
if (layer['info']['type'] == 'ReLU'):
if (layer['params']['negative_slope'] != 0):
out[layerId] = LeakyReLU(alpha=layer['params']['negative_slope'])(*layer_in)
else:
out[layerId] = Activation('relu')(*layer_in)
elif (layer['info']['type'] == 'PReLU'):
out[layerId] = PReLU()(*layer_in)
elif (layer['info']['type'] == 'ELU'):
out[layerId] = ELU(alpha=layer['params']['alpha'])(*layer_in)
elif (layer['info']['type'] == 'ThresholdedReLU'):
out[layerId] = ThresholdedReLU(theta=layer['params']['theta'])(*layer_in)
elif (layer['info']['type'] == 'Sigmoid'):
out[layerId] = Activation('sigmoid')(*layer_in)
elif (layer['info']['type'] == 'TanH'):
out[layerId] = Activation('tanh')(*layer_in)
elif (layer['info']['type'] == 'Softmax'):
out[layerId] = Activation('softmax')(*layer_in)
elif (layer['info']['type'] == 'SELU'):
out[layerId] = Activation('selu')(*layer_in)
elif (layer['info']['type'] == 'Softplus'):
out[layerId] = Activation('softplus')(*layer_in)
elif (layer['info']['type'] == 'Softsign'):
out[layerId] = Activation('softsign')(*layer_in)
elif (layer['info']['type'] == 'HardSigmoid'):
out[layerId] = Activation('hard_sigmoid')(*layer_in)
return out
def build_model(n_classes):
if K.image_dim_ordering() == 'th':
input_shape = (1, N_MEL_BANDS, SEGMENT_DUR)
channel_axis = 1
else:
input_shape = (N_MEL_BANDS, SEGMENT_DUR, 1)
channel_axis = 3
melgram_input = Input(shape=input_shape)
m_sizes = [50, 70]
n_sizes = [1, 3, 5]
n_filters = [128, 64, 32]
maxpool_const = 4
layers = list()
for m_i in m_sizes:
for i, n_i in enumerate(n_sizes):
x = Convolution2D(n_filters[i], m_i, n_i,
border_mode='same',
init='he_normal',
W_regularizer=l2(1e-5),
name=str(n_i)+'_'+str(m_i)+'_'+'conv')(melgram_input)
x = BatchNormalization(axis=channel_axis, mode=0, name=str(n_i)+'_'+str(m_i)+'_'+'bn')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(N_MEL_BANDS, SEGMENT_DUR/maxpool_const), name=str(n_i)+'_'+str(m_i)+'_'+'pool')(x)
x = Flatten(name=str(n_i)+'_'+str(m_i)+'_'+'flatten')(x)
layers.append(x)
x = merge(layers, mode='concat', concat_axis=channel_axis)
x = Dropout(0.5)(x)
x = Dense(n_classes, init='he_normal', W_regularizer=l2(1e-5), activation='softmax', name='prediction')(x)
model = Model(melgram_input, x)
return model
def test_tiny_conv_elu_random(self):
np.random.seed(1988)
# Define a model
from keras.layers.advanced_activations import ELU
model = Sequential()
model.add(Conv2D(input_shape = (10, 10, 3),
filters = 3, kernel_size = (5,5)))
model.add(ELU(alpha=0.8))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_keras_model(model)
def test_tiny_conv_elu_random(self):
np.random.seed(1988)
# Define a model
from keras.layers.advanced_activations import ELU
model = Sequential()
model.add(Convolution2D(input_shape = (10, 10, 3),
nb_filter = 3, nb_row = 5, nb_col = 5))
model.add(ELU(alpha=0.8))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_keras_model(model)
def get_activation_layer(activation):
if activation == 'LeakyReLU':
return LeakyReLU()
if activation == 'PReLU':
return PReLU()
if activation == 'ELU':
return ELU()
if activation == 'ThresholdedReLU':
return ThresholdedReLU()
return Activation(activation)
# TODO: same for optimizers, including clipnorm
def test_elu():
from keras.layers.advanced_activations import ELU
for alpha in [0., .5, -1.]:
layer_test(ELU, kwargs={'alpha': alpha},
input_shape=(2, 3, 4))
def create_model(word_coding):
"""
Create the LSTM model
:param word_coding:
:return:
"""
model = Graph()
model.add_input(name='input', input_shape=(sd_len, input_dim))
model.add_node(TimeDistributedDense(input_dim=input_dim, output_dim=lstm_hdim, input_length=sd_len),
name=layerNames[0], input='input')
model.add_node(BatchNormalization(), name=layerNames[1], input=layerNames[0])
model.add_node(LSTM(input_dim=lstm_hdim, output_dim=lstm_hdim, return_sequences=True), name=layerNames[2] + 'left',
input=layerNames[1])
model.add_node(BatchNormalization(), name=layerNames[3] + 'left', input=layerNames[2] + 'left')
model.add_node(LSTM(input_dim=lstm_hdim, output_dim=lstm_hdim, return_sequences=True, go_backwards=True),
name=layerNames[2] + 'right', input=layerNames[1])
model.add_node(BatchNormalization(), name=layerNames[3] + 'right', input=layerNames[2] + 'right')
model.add_node(LSTM(input_dim=lstm_hdim, output_dim=lstm_hdim, return_sequences=False), name=layerNames[6] + 'left',
input=layerNames[3] + 'left')
model.add_node(LSTM(input_dim=lstm_hdim, output_dim=lstm_hdim, return_sequences=False, go_backwards=True),
name=layerNames[6] + 'right', input=layerNames[3] + 'right')
model.add_node(BatchNormalization(), name=layerNames[7], inputs=[layerNames[6] + 'left', layerNames[6] + 'right'])
model.add_node(Dropout(0.2), name=layerNames[8], input=layerNames[7])
model.add_node(Dense(input_dim=bridge_dim, output_dim=dense_dim), name=layerNames[9], input=layerNames[8])
model.add_node(ELU(), name=layerNames[10], input=layerNames[9])
model.add_node(Dropout(0.2), name=layerNames[11], input=layerNames[10])
model.add_node(Dense(input_dim=dense_dim, output_dim=len(word_coding)), name=layerNames[12], input=layerNames[11])
model.add_node(Activation('softmax'), name=layerNames[13], input=layerNames[12])
model.add_output(name='output1', input=layerNames[13])
model.compile(optimizer='rmsprop', loss={'output1': 'categorical_crossentropy'})
return model
def inception_block(inputs, depth, batch_mode=0, splitted=False, activation='relu'):
assert depth % 16 == 0
actv = activation == 'relu' and (lambda: LeakyReLU(0.0)) or activation == 'elu' and (lambda: ELU(1.0)) or None
c1_1 = Convolution2D(depth/4, 1, 1, init='he_normal', border_mode='same')(inputs)
c2_1 = Convolution2D(depth/8*3, 1, 1, init='he_normal', border_mode='same')(inputs)
c2_1 = actv()(c2_1)
if splitted:
c2_2 = Convolution2D(depth/2, 1, 3, init='he_normal', border_mode='same')(c2_1)
c2_2 = BatchNormalization(mode=batch_mode, axis=1)(c2_2)
c2_2 = actv()(c2_2)
c2_3 = Convolution2D(depth/2, 3, 1, init='he_normal', border_mode='same')(c2_2)
else:
c2_3 = Convolution2D(depth/2, 3, 3, init='he_normal', border_mode='same')(c2_1)
c3_1 = Convolution2D(depth/16, 1, 1, init='he_normal', border_mode='same')(inputs)
#missed batch norm
c3_1 = actv()(c3_1)
if splitted:
c3_2 = Convolution2D(depth/8, 1, 5, init='he_normal', border_mode='same')(c3_1)
c3_2 = BatchNormalization(mode=batch_mode, axis=1)(c3_2)
c3_2 = actv()(c3_2)
c3_3 = Convolution2D(depth/8, 5, 1, init='he_normal', border_mode='same')(c3_2)
else:
c3_3 = Convolution2D(depth/8, 5, 5, init='he_normal', border_mode='same')(c3_1)
p4_1 = MaxPooling2D(pool_size=(3,3), strides=(1,1), border_mode='same')(inputs)
c4_2 = Convolution2D(depth/8, 1, 1, init='he_normal', border_mode='same')(p4_1)
res = merge([c1_1, c2_3, c3_3, c4_2], mode='concat', concat_axis=1)
res = BatchNormalization(mode=batch_mode, axis=1)(res)
res = actv()(res)
return res
def rblock(inputs, num, depth, scale=0.1):
residual = Convolution2D(depth, num, num, border_mode='same')(inputs)
residual = BatchNormalization(mode=2, axis=1)(residual)
residual = Lambda(lambda x: x*scale)(residual)
res = _shortcut(inputs, residual)
return ELU()(res)
def NConvolution2D(nb_filter, nb_row, nb_col, border_mode='same', subsample=(1, 1)):
def f(_input):
conv = Convolution2D(nb_filter=nb_filter, nb_row=nb_row, nb_col=nb_col, subsample=subsample,
border_mode=border_mode)(_input)
norm = BatchNormalization(mode=2, axis=1)(conv)
return ELU()(norm)
return f
def BNA(_input):
inputs_norm = BatchNormalization(mode=2, axis=1)(_input)
return ELU()(inputs_norm)
def MyCNN(X, nb_classes, nb_layers=4):
nb_filters = 32 # number of convolutional filters = "feature maps"
kernel_size = (3, 3) # convolution kernel size
pool_size = (2, 2) # size of pooling area for max pooling
cl_dropout = 0.5 # conv. layer dropout
dl_dropout = 0.6 # dense layer dropout
channels = X.shape[1] # channels = 1 for mono, 2 for stereo
print(" MyCNN: X.shape = ",X.shape,", channels = ",channels)
input_shape = (channels, X.shape[2], X.shape[3])
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid', input_shape=input_shape))
model.add(BatchNormalization(axis=1, mode=2))
model.add(Activation('relu'))
for layer in range(nb_layers-1): # add more layers than just the first
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
model.add(BatchNormalization(axis=1, mode=2))
model.add(ELU(alpha=1.0))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(cl_dropout))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(dl_dropout))
model.add(Dense(nb_classes))
model.add(Activation("softmax"))
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
return model
def MyCNN_Keras2(X, nb_classes, nb_layers=4):
from keras import backend as K
K.set_image_data_format('channels_first')
nb_filters = 32 # number of convolutional filters = "feature maps"
kernel_size = (3, 3) # convolution kernel size
pool_size = (2, 2) # size of pooling area for max pooling
cl_dropout = 0.5 # conv. layer dropout
dl_dropout = 0.8 # dense layer dropout
channels = X.shape[1] # channels = 1 for mono, 2 for stereo
print(" MyCNN_Keras2: X.shape = ",X.shape,", channels = ",channels)
input_shape = (channels, X.shape[2], X.shape[3])
model = Sequential()
#model.add(Conv2D(nb_filters, kernel_size, border_mode='valid', input_shape=input_shape))
model.add(Conv2D(nb_filters, kernel_size, border_mode='valid', input_shape=input_shape))
model.add(BatchNormalization(axis=1))
model.add(Activation('relu'))
for layer in range(nb_layers-1): # add more layers than just the first
model.add(Conv2D(nb_filters, kernel_size))
model.add(BatchNormalization(axis=1))
model.add(ELU(alpha=1.0))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(cl_dropout))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(dl_dropout))
model.add(Dense(nb_classes))
model.add(Activation("softmax"))
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
return model
def test_elu():
from keras.layers.advanced_activations import ELU
for alpha in [0., .5, -1.]:
layer_test(ELU, kwargs={'alpha': alpha},
input_shape=(2, 3, 4))
def build_model(X,Y,nb_classes):
nb_filters = 32 # number of convolutional filters to use
pool_size = (2, 2) # size of pooling area for max pooling
kernel_size = (3, 3) # convolution kernel size
nb_layers = 4
input_shape = (1, X.shape[2], X.shape[3])
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid', input_shape=input_shape))
model.add(BatchNormalization(axis=1, mode=2))
model.add(Activation('relu'))
for layer in range(nb_layers-1):
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
model.add(BatchNormalization(axis=1, mode=2))
model.add(ELU(alpha=1.0))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation("softmax"))
return model
train_network.py 文件源码
项目:audio-classifier-keras-cnn
作者: drscotthawley
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def build_model(X,Y,nb_classes):
nb_filters = 32 # number of convolutional filters to use
pool_size = (2, 2) # size of pooling area for max pooling
kernel_size = (3, 3) # convolution kernel size
nb_layers = 4
input_shape = (1, X.shape[2], X.shape[3])
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid', input_shape=input_shape))
model.add(BatchNormalization(axis=1, mode=2))
model.add(Activation('relu'))
for layer in range(nb_layers-1):
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
model.add(BatchNormalization(axis=1, mode=2))
model.add(ELU(alpha=1.0))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation("softmax"))
return model
def test_keras_import(self):
# softmax
model = Sequential()
model.add(Activation('softmax', input_shape=(15,)))
model.build()
self.keras_type_test(model, 0, 'Softmax')
# relu
model = Sequential()
model.add(Activation('relu', input_shape=(15,)))
model.build()
self.keras_type_test(model, 0, 'ReLU')
# tanh
model = Sequential()
model.add(Activation('tanh', input_shape=(15,)))
model.build()
self.keras_type_test(model, 0, 'TanH')
# sigmoid
model = Sequential()
model.add(Activation('sigmoid', input_shape=(15,)))
model.build()
self.keras_type_test(model, 0, 'Sigmoid')
# selu
model = Sequential()
model.add(Activation('selu', input_shape=(15,)))
model.build()
self.keras_type_test(model, 0, 'SELU')
# softplus
model = Sequential()
model.add(Activation('softplus', input_shape=(15,)))
model.build()
self.keras_type_test(model, 0, 'Softplus')
# softsign
model = Sequential()
model.add(Activation('softsign', input_shape=(15,)))
model.build()
self.keras_type_test(model, 0, 'Softsign')
# hard_sigmoid
model = Sequential()
model.add(Activation('hard_sigmoid', input_shape=(15,)))
model.build()
self.keras_type_test(model, 0, 'HardSigmoid')
# LeakyReLU
model = Sequential()
model.add(LeakyReLU(alpha=1, input_shape=(15,)))
model.build()
self.keras_type_test(model, 0, 'ReLU')
# PReLU
model = Sequential()
model.add(PReLU(input_shape=(15,)))
model.build()
self.keras_type_test(model, 0, 'PReLU')
# ELU
model = Sequential()
model.add(ELU(alpha=1, input_shape=(15,)))
model.build()
self.keras_type_test(model, 0, 'ELU')
# ThresholdedReLU
model = Sequential()
model.add(ThresholdedReLU(theta=1, input_shape=(15,)))
model.build()
self.keras_type_test(model, 0, 'ThresholdedReLU')
def build_model(n_classes):
if K.image_dim_ordering() == 'th':
input_shape = (1, N_MEL_BANDS, SEGMENT_DUR)
channel_axis = 1
else:
input_shape = (N_MEL_BANDS, SEGMENT_DUR, 1)
channel_axis = 3
melgram_input = Input(shape=input_shape)
maxpool_const = 4
m_sizes = [5, 80]
n_sizes = [1, 3, 5]
n_filters = [128, 64, 32]
layers = list()
for m_i in m_sizes:
for i, n_i in enumerate(n_sizes):
x = Convolution2D(n_filters[i], m_i, n_i,
border_mode='same',
init='he_normal',
W_regularizer=l2(1e-5),
name=str(n_i)+'_'+str(m_i)+'_'+'conv')(melgram_input)
x = BatchNormalization(axis=channel_axis, mode=0, name=str(n_i)+'_'+str(m_i)+'_'+'bn')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(N_MEL_BANDS/maxpool_const, SEGMENT_DUR/maxpool_const),
name=str(n_i)+'_'+str(m_i)+'_'+'pool')(x)
layers.append(x)
x = merge(layers, mode='concat', concat_axis=channel_axis)
x = Dropout(0.25)(x)
x = Convolution2D(128, 3, 3, init='he_normal', W_regularizer=l2(1e-5), border_mode='same', name='conv2')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn2')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(x)
x = Dropout(0.25)(x)
x = Convolution2D(128, 3, 3, init='he_normal', W_regularizer=l2(1e-5), border_mode='same', name='conv3')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn3')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool3')(x)
x = Flatten(name='flatten')(x)
x = Dropout(0.5)(x)
x = Dense(256, init='he_normal', W_regularizer=l2(1e-5), name='fc1')(x)
x = ELU()(x)
x = Dropout(0.5)(x)
x = Dense(n_classes, init='he_normal', W_regularizer=l2(1e-5), activation='softmax', name='prediction')(x)
model = Model(melgram_input, x)
return model
def fit(self, X, y):
## scaler
self.scaler = StandardScaler()
X = self.scaler.fit_transform(X)
#### build model
self.model = Sequential()
## input layer
self.model.add(Dropout(self.input_dropout, input_shape=(X.shape[1],)))
## hidden layers
first = True
hidden_layers = self.hidden_layers
while hidden_layers > 0:
self.model.add(Dense(self.hidden_units))
if self.batch_norm == "before_act":
self.model.add(BatchNormalization())
if self.hidden_activation == "prelu":
self.model.add(PReLU())
elif self.hidden_activation == "elu":
self.model.add(ELU())
else:
self.model.add(Activation(self.hidden_activation))
if self.batch_norm == "after_act":
self.model.add(BatchNormalization())
self.model.add(Dropout(self.hidden_dropout))
hidden_layers -= 1
## output layer
output_dim = 1
output_act = "linear"
self.model.add(Dense(output_dim))
self.model.add(Activation(output_act))
## loss
if self.optimizer == "sgd":
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
self.model.compile(loss="mse", optimizer=sgd)
else:
self.model.compile(loss="mse", optimizer=self.optimizer)
## fit
self.model.fit(X, y,
nb_epoch=self.nb_epoch,
batch_size=self.batch_size,
validation_split=0, verbose=0)
return self