def __init__(self, input_shape, lr=0.01, n_layers=2, n_hidden=8, rate_dropout=0.2, loss='risk_estimation'):
print("initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s." %(lr, n_layers, n_hidden, rate_dropout))
self.model = Sequential()
self.model.add(Dropout(rate=rate_dropout, input_shape=(input_shape[0], input_shape[1])))
for i in range(0, n_layers - 1):
self.model.add(LSTM(n_hidden * 4, return_sequences=True, activation='tanh',
recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal', bias_initializer='zeros',
dropout=rate_dropout, recurrent_dropout=rate_dropout))
self.model.add(LSTM(n_hidden, return_sequences=False, activation='tanh',
recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal', bias_initializer='zeros',
dropout=rate_dropout, recurrent_dropout=rate_dropout))
self.model.add(Dense(1, kernel_initializer=initializers.glorot_uniform()))
# self.model.add(BatchNormalization(axis=-1, moving_mean_initializer=Constant(value=0.5),
# moving_variance_initializer=Constant(value=0.25)))
self.model.add(BatchRenormalization(axis=-1, beta_init=Constant(value=0.5)))
self.model.add(Activation('relu_limited'))
opt = RMSprop(lr=lr)
self.model.compile(loss=loss,
optimizer=opt,
metrics=['accuracy'])
python类BatchNormalization()的实例源码
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = Conv1D(k1,1,padding='same')(tensor_input)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv1D(k2,kernel_size,padding='same')(out)
pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input)
# out = merge([out,pooling],mode='sum')
out = add([out,pooling])
return out
def build_mlp(n_con,n_emb,vocabs_size,n_dis,emb_size,cluster_size):
hidden_size = 800
con = Sequential()
con.add(Dense(input_dim=n_con,output_dim=emb_size))
emb_list = []
for i in range(n_emb):
emb = Sequential()
emb.add(Embedding(input_dim=vocabs_size[i],output_dim=emb_size,input_length=n_dis))
emb.add(Flatten())
emb_list.append(emb)
model = Sequential()
model.add(Merge([con] + emb_list,mode='concat'))
model.add(BatchNormalization())
model.add(Dense(hidden_size,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(cluster_size,activation='softmax'))
model.add(Lambda(caluate_point, output_shape =[2]))
return model
def largeann(input_shape, n_classes, layers=3, neurons=2000, dropout=0.35 ):
"""
for working with extracted features
"""
# gpu = switch_gpu()
# with K.tf.device('/gpu:{}'.format(gpu)):
# K.set_session(K.tf.Session(config=K.tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)))
model = Sequential(name='ann')
# model.gpu = gpu
for l in range(layers):
model.add(Dense (neurons, input_shape=input_shape, activation='elu', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(Dense(n_classes, activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[keras.metrics.categorical_accuracy])
return model
#%% everyhing recurrent for ANN
def create_actor_network(self, state_size,action_dim):
print("Now we build the model")
# Batch norm version
S = Input(shape=[state_size])
s1 = BatchNormalization()(S)
s1 = Dense(HIDDEN1_UNITS)(s1)
s1 = BatchNormalization()(s1)
s1 = Activation('relu')(s1)
s1 = Dense(HIDDEN2_UNITS)(s1)
s1 = BatchNormalization()(s1)
h1 = Activation('relu')(s1)
Steering = Dense(1,activation='tanh')(h1)
Acceleration = Dense(1,activation='sigmoid')(h1)
Brake = Dense(1,activation='sigmoid')(h1)
# V = merge([Steering,Acceleration,Brake],mode='concat')
V = layers.concatenate([Steering,Acceleration,Brake])
model = Model(inputs=S,outputs=V)
return model, model.trainable_weights, S
def make_model(batch_size, image_dim):
model = Sequential()
model.add(BatchNormalization(batch_input_shape=(batch_size,image_dim[1],image_dim[2],1)))
model.add(Conv2D( 16 , [3,3], activation='relu',padding='same'))
#model.add(Dropout(0.2))
model.add(Conv2D( 32 , [3,3], activation='relu',padding='same'))
#model.add(Dropout(0.2))
model.add(Conv2D( 64 , [3,3], activation='relu',padding='same'))
model.add(Dropout(0.2))
#model.add(Conv2D( 16 , [3,3], activation='relu',padding='same'))
#model.add(Dropout(0.2))
#model.add(Conv2D( 16 , [3,3], activation='relu',padding='same'))
#model.add(Dropout(0.2))
#model.add(Conv2D( 16 , [3,3], activation='relu',padding='same'))
#model.add(Conv2D(64, (3, 3), activation='relu',padding='same'))
#model.add(Conv2D(64, (3, 3), activation='relu',padding='same'))
#model.add(Conv2D(64, (3, 3), activation='relu',padding='same'))
model.add(Conv2D(1, kernel_size=1, padding='same', activation='sigmoid'))
return(model)
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):
k1,k2 = filters
out = BatchNormalization()(x)
out = Activation('relu')(out)
out = Conv1D(k1,kernel_size,strides=2,padding='same')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Conv1D(k2,kernel_size,strides=2,padding='same')(out)
pooling = MaxPooling1D(pooling_size,strides=4,padding='same')(x)
out = add([out, pooling])
#out = merge([out,pooling])
return out
def test_keras_import(self):
model = Sequential()
model.add(BatchNormalization(center=True, scale=True, beta_regularizer=regularizers.l2(0.01),
gamma_regularizer=regularizers.l2(0.01),
beta_constraint='max_norm', gamma_constraint='max_norm',
input_shape=(10, 16)))
model.build()
json_string = Model.to_json(model)
with open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'w') as out:
json.dump(json.loads(json_string), out, indent=4)
sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'r')
response = self.client.post(reverse('keras-import'), {'file': sample_file})
response = json.loads(response.content)
layerId = sorted(response['net'].keys())
self.assertEqual(response['result'], 'success')
self.assertEqual(response['net'][layerId[0]]['info']['type'], 'Scale')
self.assertEqual(response['net'][layerId[1]]['info']['type'], 'BatchNorm')
# ********** Noise Layers **********
def test_keras_export(self):
tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
'keras_export_test.json'), 'r')
response = json.load(tests)
tests.close()
net = yaml.safe_load(json.dumps(response['net']))
net = {'l0': net['Input'], 'l1': net['BatchNorm'], 'l2': net['Scale']}
net['l0']['connection']['output'].append('l1')
# Test 1
inp = data(net['l0'], '', 'l0')['l0']
temp = batch_norm(net['l1'], [inp], 'l1', 'l2', net['l2'])
model = Model(inp, temp['l2'])
self.assertEqual(model.layers[1].__class__.__name__, 'BatchNormalization')
# Test 2
net['l2']['params']['filler'] = 'VarianceScaling'
net['l2']['params']['bias_filler'] = 'VarianceScaling'
inp = data(net['l0'], '', 'l0')['l0']
temp = batch_norm(net['l1'], [inp], 'l1', 'l2', net['l2'])
model = Model(inp, temp['l2'])
self.assertEqual(model.layers[1].__class__.__name__, 'BatchNormalization')
# Test 3
inp = data(net['l0'], '', 'l0')['l0']
temp = batch_norm(net['l1'], [inp], 'l1', 'l0', net['l0'])
model = Model(inp, temp['l1'])
self.assertEqual(model.layers[1].__class__.__name__, 'BatchNormalization')
def conv2d_bn(x, nb_filter, nb_row, nb_col,
border_mode='same', subsample=(1, 1),
name=None):
'''Utility function to apply conv + BN.
'''
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
if K.image_dim_ordering() == 'th':
bn_axis = 1
else:
bn_axis = 3
x = Convolution2D(nb_filter, nb_row, nb_col,
subsample=subsample,
activation='relu',
border_mode=border_mode,
name=conv_name)(x)
x = BatchNormalization(axis=bn_axis, name=bn_name)(x)
return x
def keepsize_256(nx, ny, noise, depth, activation='relu', n_filters=64, l2_reg=1e-7):
"""
Deep residual network that keeps the size of the input throughout the whole network
"""
def residual(inputs, n_filters):
x = ReflectionPadding2D()(inputs)
x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x = BatchNormalization()(x)
x = Activation(activation)(x)
x = ReflectionPadding2D()(x)
x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x = BatchNormalization()(x)
x = add([x, inputs])
return x
inputs = Input(shape=(nx, ny, 1))
x = GaussianNoise(noise)(inputs)
x = ReflectionPadding2D()(x)
x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x0 = Activation(activation)(x)
x = residual(x0, n_filters)
for i in range(depth-1):
x = residual(x, n_filters)
x = ReflectionPadding2D()(x)
x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x = BatchNormalization()(x)
x = add([x, x0])
# Upsampling for superresolution
x = UpSampling2D()(x)
x = ReflectionPadding2D()(x)
x = Conv2D(4*n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
x = Activation(activation)(x)
final = Conv2D(1, (1, 1), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
return Model(inputs=inputs, outputs=final)
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor, subsample_factor)
x = BatchNormalization(axis=4)(input_tensor)
x = Activation('relu')(x)
x = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=4)(x)
x = Activation('relu')(x)
x = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution3D(nb_filters, 1, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor, subsample_factor)
x = BatchNormalization(axis=4)(input_tensor)
x = Activation('relu')(x)
x = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=4)(x)
x = Activation('relu')(x)
x = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution3D(nb_filters, 1, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor, subsample_factor)
x = BatchNormalization(axis=4)(input_tensor)
x = Activation('relu')(x)
x = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=4)(x)
x = Activation('relu')(x)
x = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution3D(nb_filters, 1, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor, subsample_factor)
x = BatchNormalization(axis=4)(input_tensor)
x = Activation('relu')(x)
x = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=4)(x)
x = Activation('relu')(x)
x = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution3D(nb_filters, 1, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def prep_model(inputs, N, s0pad, s1pad, c):
# Word-level projection before averaging
inputs[0] = TimeDistributed(Dense(N, activation='relu'))(inputs[0])
inputs[0] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[0])
inputs[1] = TimeDistributed(Dense(N, activation='relu'))(inputs[1])
inputs[1] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[1])
merged = concatenate([inputs[0], inputs[1]])
# Deep
for i in range(c['deep']):
merged = Dense(c['nndim'], activation=c['nnact'])(merged)
merged = Dropout(c['nndropout'])(merged)
merged = BatchNormalization()(merged)
is_duplicate = Dense(1, activation='sigmoid')(merged)
return [is_duplicate], N