def cnn_word_model(self):
embed_input = Input(shape=(self.opt['max_sequence_length'], self.opt['embedding_dim'],))
outputs = []
for i in range(len(self.kernel_sizes)):
output_i = Conv1D(self.opt['filters_cnn'], kernel_size=self.kernel_sizes[i], activation=None,
kernel_regularizer=l2(self.opt['regul_coef_conv']), padding='same')(embed_input)
output_i = BatchNormalization()(output_i)
output_i = Activation('relu')(output_i)
output_i = GlobalMaxPooling1D()(output_i)
outputs.append(output_i)
output = concatenate(outputs, axis=1)
output = Dropout(rate=self.opt['dropout_rate'])(output)
output = Dense(self.opt['dense_dim'], activation=None,
kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
output = BatchNormalization()(output)
output = Activation('relu')(output)
output = Dropout(rate=self.opt['dropout_rate'])(output)
output = Dense(1, activation=None, kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
output = BatchNormalization()(output)
act_output = Activation('sigmoid')(output)
model = Model(inputs=embed_input, outputs=act_output)
return model
python类concatenate()的实例源码
def generator_model(noise_dim=100, aux_dim=47, model_name="generator"):
# Merge noise and auxilary inputs
gen_input = Input(shape=(noise_dim,), name="noise_input")
aux_input = Input(shape=(aux_dim,), name="auxilary_input")
x = concatenate([gen_input, aux_input], axis=-1)
# Dense Layer 1
x = Dense(10 * 100)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x) # output shape is 10*100
# Reshape the tensors to support CNNs
x = Reshape((100, 10))(x) # shape is 100 x 10
# Conv Layer 1
x = Conv1D(filters=250, kernel_size=13, padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x) # output shape is 100 x 250
x = UpSampling1D(size=2)(x) # output shape is 200 x 250
# Conv Layer 2
x = Conv1D(filters=100, kernel_size=13, padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x) # output shape is 200 x 100
x = UpSampling1D(size=2)(x) # output shape is 400 x 100
# Conv Layer 3
x = Conv1D(filters=1, kernel_size=13, padding='same')(x)
x = BatchNormalization()(x)
x = Activation('tanh')(x) # final output shape is 400 x 1
generator_model = Model(
outputs=[x], inputs=[gen_input, aux_input], name=model_name)
return generator_model
def get_model():
inputs = Input(shape=(64, 64, 3))
conv_1 = Conv2D(1, (3, 3), strides=(1, 1), padding='same')(inputs)
act_1 = Activation('relu')(conv_1)
conv_2 = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(act_1)
act_2 = Activation('relu')(conv_2)
deconv_1 = Conv2DTranspose(64, (3, 3), strides=(1, 1), padding='same')(act_2)
act_3 = Activation('relu')(deconv_1)
merge_1 = concatenate([act_3, act_1], axis=3)
deconv_2 = Conv2DTranspose(1, (3, 3), strides=(1, 1), padding='same')(merge_1)
act_4 = Activation('relu')(deconv_2)
model = Model(inputs=[inputs], outputs=[act_4])
model.compile(optimizer='adadelta', loss=dice_coef_loss, metrics=[dice_coef])
return model
def create_actor_network(self, state_size,action_dim):
print("Now we build the model")
# Batch norm version
S = Input(shape=[state_size])
s1 = BatchNormalization()(S)
s1 = Dense(HIDDEN1_UNITS)(s1)
s1 = BatchNormalization()(s1)
s1 = Activation('relu')(s1)
s1 = Dense(HIDDEN2_UNITS)(s1)
s1 = BatchNormalization()(s1)
h1 = Activation('relu')(s1)
Steering = Dense(1,activation='tanh')(h1)
Acceleration = Dense(1,activation='sigmoid')(h1)
Brake = Dense(1,activation='sigmoid')(h1)
# V = merge([Steering,Acceleration,Brake],mode='concat')
V = layers.concatenate([Steering,Acceleration,Brake])
model = Model(inputs=S,outputs=V)
return model, model.trainable_weights, S
def discriminator_model(model_name="discriminator"):
disc_input = Input(shape=(400, 1), name="discriminator_input")
aux_input = Input(shape=(47,), name="auxilary_input")
# Conv Layer 1
x = Conv1D(filters=100, kernel_size=13, padding='same')(disc_input)
x = LeakyReLU(0.2)(x) # output shape is 100 x 400
x = AveragePooling1D(pool_size=20)(x) # ouput shape is 100 x 20
# Conv Layer 2
x = Conv1D(filters=250, kernel_size=13, padding='same')(x)
x = LeakyReLU(0.2)(x) # output shape is 250 x 20
x = AveragePooling1D(pool_size=5)(x) # output shape is 250 x 4
# Conv Layer 3
x = Conv1D(filters=300, kernel_size=13, padding='same')(x)
x = LeakyReLU(0.2)(x) # output shape is 300 x 4
x = Flatten()(x) # output shape is 1200
x = concatenate([x, aux_input], axis=-1) # shape is 1247
# Dense Layer 1
x = Dense(200)(x)
x = LeakyReLU(0.2)(x) # output shape is 200
# Dense Layer 2
x = Dense(1)(x)
x = Activation('sigmoid')(x)
discriminator_model = Model(
outputs=[x], inputs=[disc_input, aux_input], name=model_name)
return discriminator_model
def load_data(data_dir, num_files=30):
files_list = os.listdir(data_dir)
data = None
ac_data = None
for fname in files_list[:num_files]:
print fname
f = os.path.join(data_dir, fname)
with netcdf.netcdf_file(f, 'r') as fid:
m = fid.variables['outputMeans'][:].copy()
s = fid.variables['outputStdevs'][:].copy()
feats = fid.variables['targetPatterns'][:].copy()
ac_feats = fid.variables['inputs'][:].copy()
scaler = preprocessing.StandardScaler()
scaler.mean_ = m
scaler.scale_ = s
feats = scaler.inverse_transform(feats)
assert feats.shape[0] == ac_feats.shape[0]
# feats = np.concatenate((feats,ac_feats),axis=1)
if data == None and ac_data == None:
data = feats
ac_data = ac_feats
else:
data = np.vstack((data, feats))
ac_data = np.vstack((ac_data, ac_feats))
return data, ac_data
def fire_module(x, fire_id, squeeze=16, expand=64):
s_id = 'fire' + str(fire_id) + '/'
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
x = Convolution2D(squeeze, (1, 1), padding='valid', name=s_id + sq1x1)(x)
x = Activation('relu', name=s_id + relu + sq1x1)(x)
left = Convolution2D(expand, (1, 1), padding='valid', name=s_id + exp1x1)(x)
left = Activation('relu', name=s_id + relu + exp1x1)(left)
right = Convolution2D(expand, (3, 3), padding='same', name=s_id + exp3x3)(x)
right = Activation('relu', name=s_id + relu + exp3x3)(right)
x = concatenate([left, right], axis=channel_axis, name=s_id + 'concat')
return x
# Original SqueezeNet from paper.
def prep_model(inputs, N, s0pad, s1pad, c):
# Word-level projection before averaging
inputs[0] = TimeDistributed(Dense(N, activation='relu'))(inputs[0])
inputs[0] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[0])
inputs[1] = TimeDistributed(Dense(N, activation='relu'))(inputs[1])
inputs[1] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[1])
merged = concatenate([inputs[0], inputs[1]])
# Deep
for i in range(c['deep']):
merged = Dense(c['nndim'], activation=c['nnact'])(merged)
merged = Dropout(c['nndropout'])(merged)
merged = BatchNormalization()(merged)
is_duplicate = Dense(1, activation='sigmoid')(merged)
return [is_duplicate], N
def prep_model(inputs, N, s0pad, s1pad, c):
# Word-level projection before averaging
inputs[0] = TimeDistributed(Dense(N, activation='relu'))(inputs[0])
inputs[0] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[0])
inputs[1] = TimeDistributed(Dense(N, activation='relu'))(inputs[1])
inputs[1] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[1])
merged = concatenate([inputs[0], inputs[1]])
# Deep
for i in range(c['deep']):
merged = Dense(c['nndim'], activation=c['nnact'])(merged)
merged = Dropout(c['nndropout'])(merged)
merged = BatchNormalization()(merged)
is_duplicate = Dense(1, activation='sigmoid')(merged)
return [is_duplicate], N
def build_discriminator(self):
z = Input(shape=(self.latent_dim, ))
img = Input(shape=self.img_shape)
d_in = concatenate([z, Flatten()(img)])
model = Dense(1024)(d_in)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
validity = Dense(1, activation="sigmoid")(model)
return Model([z, img], validity)
def test_tiny_concat_random(self):
np.random.seed(1988)
input_dim = 10
num_channels = 6
# Define a model
input_tensor = Input(shape = (input_dim, ))
x1 = Dense(num_channels)(input_tensor)
x2 = Dense(num_channels)(x1)
x3 = Dense(num_channels)(x1)
x4 = concatenate([x2, x3])
x5 = Dense(num_channels)(x4)
model = Model(inputs=[input_tensor], outputs=[x5])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_keras_model(model)
def test_tiny_concat_seq_random(self):
np.random.seed(1988)
max_features = 10
embedding_dims = 4
seq_len = 5
num_channels = 6
# Define a model
input_tensor = Input(shape = (seq_len, ))
x1 = Embedding(max_features, embedding_dims)(input_tensor)
x2 = Embedding(max_features, embedding_dims)(input_tensor)
x3 = concatenate([x1, x2], axis=1)
model = Model(inputs=[input_tensor], outputs=[x3])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_keras_model(model, one_dim_seq_flags=[True])
def test_shared_vision(self):
digit_input = Input(shape=(27, 27,1))
x = Conv2D(64, (3, 3))(digit_input)
x = Conv2D(64, (3, 3))(x)
out = Flatten()(x)
vision_model = Model(inputs=[digit_input], outputs=[out])
# then define the tell-digits-apart model
digit_a = Input(shape=(27,27,1))
digit_b = Input(shape=(27,27,1))
# the vision model will be shared, weights and all
out_a = vision_model(digit_a)
out_b = vision_model(digit_b)
concatenated = concatenate([out_a, out_b])
out = Dense(1, activation='sigmoid')(concatenated)
model = Model(inputs=[digit_a, digit_b], outputs=out)
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_keras_model(model)
def test_dense_elementwise_params(self):
options = dict(
modes = [add, multiply, concatenate, average, maximum]
)
def build_model(mode):
x1 = Input(shape=(3,))
x2 = Input(shape=(3,))
y1 = Dense(4)(x1)
y2 = Dense(4)(x2)
z = mode([y1, y2])
model = Model([x1,x2], z)
return mode, model
product = itertools.product(*options.values())
args = [build_model(p[0]) for p in product]
print("Testing a total of %s cases. This could take a while" % len(args))
for param, model in args:
self._run_test(model, param)
def test_tiny_image_captioning_feature_merge(self):
img_input_1 = Input(shape=(16,16,3))
x = Conv2D(2,(3,3))(img_input_1)
x = Flatten()(x)
img_model = Model([img_input_1], [x])
img_input = Input(shape=(16,16,3))
x = img_model(img_input)
x = Dense(8, name = 'cap_dense')(x)
x = Reshape((1,8), name = 'cap_reshape')(x)
sentence_input = Input(shape=(5,)) # max_length = 5
y = Embedding(8, 8, name = 'cap_embedding')(sentence_input)
z = concatenate([x,y], axis = 1, name = 'cap_merge')
combined_model = Model(inputs=[img_input, sentence_input], outputs=[z])
self._test_keras_model(combined_model, one_dim_seq_flags=[False, True])
def test_tiny_image_captioning(self):
# use a conv layer as a image feature branch
img_input_1 = Input(shape=(16,16,3))
x = Conv2D(2,(3,3))(img_input_1)
x = Flatten()(x)
img_model = Model(inputs=[img_input_1], outputs=[x])
img_input = Input(shape=(16,16,3))
x = img_model(img_input)
x = Dense(8, name = 'cap_dense')(x)
x = Reshape((1,8), name = 'cap_reshape')(x)
sentence_input = Input(shape=(5,)) # max_length = 5
y = Embedding(8, 8, name = 'cap_embedding')(sentence_input)
z = concatenate([x,y], axis = 1, name = 'cap_merge')
z = LSTM(4, return_sequences = True, name = 'cap_lstm')(z)
z = TimeDistributed(Dense(8), name = 'cap_timedistributed')(z)
combined_model = Model(inputs=[img_input, sentence_input], outputs=[z])
self._test_keras_model(combined_model, one_dim_seq_flags=[False, True])
def global_handle(self, emb_layer, flag):
fw_lstm_out = self.forward_lstm(emb_layer)
bw_lstm_out = self.backward_lstm(emb_layer)
conv_out = self.conv_dropout(self.conv(emb_layer))
fw_lstm_out = TimeDistributed(Dense(self.params['attention_dim']), name='fw_tb_'+flag)(fw_lstm_out)
fw_lstm_att = Attention()(fw_lstm_out)
# fw_lstm_att = Reshape((self.params['lstm_output_dim'], 1))(fw_lstm_att)
conv_out = TimeDistributed(Dense(self.params['attention_dim']), name='conv_tb_'+flag)(conv_out)
conv_att = Attention()(conv_out)
# conv_att = Reshape((self.params['filters'], 1))(conv_att)
bw_lstm_out = TimeDistributed(Dense(self.params['attention_dim']), name='bw_tb_'+flag)(bw_lstm_out)
bw_lstm_att = Attention()(bw_lstm_out)
# bw_lstm_att = Reshape((self.params['lstm_output_dim'], 1))(bw_lstm_att)
return concatenate([fw_lstm_att, conv_att, bw_lstm_att], axis=2)
def attention_step(
self,
attended,
attention_states,
step_input,
recurrent_states
):
[attention_tm1, kappa_tm1] = attention_states
params = self.params_layer(
concatenate([step_input, recurrent_states[0]])
)
attention, kappa = self._get_attention_and_kappa(
attended,
params,
kappa_tm1
)
return attention, [attention, kappa]
def QRNcell():
xq = Input(batch_shape=(batch_size, embedding_dim * 2))
# Split into context and query
xt = Lambda(lambda x, dim: x[:, :dim], arguments={'dim': embedding_dim},
output_shape=lambda s: (s[0], s[1] / 2))(xq)
qt = Lambda(lambda x, dim: x[:, dim:], arguments={'dim': embedding_dim},
output_shape=lambda s: (s[0], s[1] / 2))(xq)
h_tm1 = Input(batch_shape=(batch_size, embedding_dim))
zt = Dense(1, activation='sigmoid', bias_initializer=Constant(2.5))(multiply([xt, qt]))
zt = Lambda(lambda x, dim: K.repeat_elements(x, dim, axis=1), arguments={'dim': embedding_dim})(zt)
ch = Dense(embedding_dim, activation='tanh')(concatenate([xt, qt], axis=-1))
rt = Dense(1, activation='sigmoid')(multiply([xt, qt]))
rt = Lambda(lambda x, dim: K.repeat_elements(x, dim, axis=1), arguments={'dim': embedding_dim})(rt)
ht = add([multiply([zt, ch, rt]), multiply([Lambda(lambda x: 1 - x, output_shape=lambda s: s)(zt), h_tm1])])
return RecurrentModel(input=xq, output=ht, initial_states=[h_tm1], final_states=[ht], return_sequences=True)
#
# Load data
#
def generate_data(num_samples, max_len):
values = np.random.normal(size=[num_samples, max_len, 1])
mask = np.zeros([num_samples, max_len, 1])
answers = np.zeros([num_samples, 1])
for i in range(num_samples):
j1, j2 = 0, 0
while j1 == j2:
j1 = np.random.randint(max_len)
j2 = np.random.randint(max_len)
mask[i, (j1, j2)] = 1.0
answers[i] = np.sum(values[i]*mask[i])
data = np.concatenate((values, mask), 2)
return data, answers
#####################################################################
# RWA layer
#####################################################################
def yolov2_detector(feature_map,
fine_grained_layers):
"""
Original YOLOv2 Implementation
:param feature_map:
:param fine_grained_layers:
:param conv_block_func:
:return:
"""
layer = fine_grained_layers[0]
x = conv_block(feature_map, 1024, (3, 3))
x = conv_block(x, 1024, (3, 3))
x2 = x
connected_layer = conv_block(layer, 64, (1, 1))
rerouted_layer = Reroute(block_size=2,
name='space_to_depth_x2')(connected_layer)
x = concatenate([rerouted_layer, x2])
x = conv_block(x, 1024, (3, 3))
return x
def mobilenet_detector(feature_map,
fine_grained_layers):
"""
MobileNet Detector Implementation
:param feature_extractor:
:param num_classes:
:param num_anchors:
:param fine_grained_layers
:return:
"""
x = _depthwise_conv_block(feature_map, 1024, 1.0, block_id=14)
x = _depthwise_conv_block(x, 1024, 1.0, block_id=15)
# Reroute
concat_layers = [x]
for layer in fine_grained_layers:
connected_layer = _depthwise_conv_block(layer, 64, (1, 1))
rerouted_layer = Reroute(block_size=2, name='space_to_depth_x2')(connected_layer)
concat_layers.append(rerouted_layer)
x = concatenate(concat_layers)
x = _depthwise_conv_block(x, 1024, (3, 3))
return x
def create_actor_network(self, state_size,action_dim):
print("Now we build the model")
S = Input(shape=[state_size])
h0 = Dense(HIDDEN1_UNITS, activation='relu')(S)
h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0)
# Steering = Dense(1,activation='tanh',init=lambda shape:VarianceScaling(scale=1e-4)(shape))(h1)
# Acceleration = Dense(1,activation='sigmoid',lambda shape:VarianceScaling(scale=1e-4)(shape))(h1)
# Brake = Dense(1,activation='sigmoid',lambda shape:VarianceScaling(scale=1e-4)(shape))(h1)
Steering = Dense(1,activation='tanh')(h1)
Acceleration = Dense(1,activation='sigmoid')(h1)
Brake = Dense(1,activation='sigmoid')(h1)
# V = merge([Steering,Acceleration,Brake],mode='concat')
V = layers.concatenate([Steering,Acceleration,Brake])
model = Model(inputs=S,outputs=V)
return model, model.trainable_weights, S
def create_actor_network(self, state_size,action_dim):
## original version
print("Now we build the model")
S = Input(shape=[state_size])
h0 = Dense(HIDDEN1_UNITS, activation='relu')(S)
h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0)
Steering = Dense(1,activation='tanh')(h1)
Acceleration = Dense(1,activation='sigmoid')(h1)
Brake = Dense(1,activation='sigmoid')(h1)
# V = merge([Steering,Acceleration,Brake],mode='concat')
V = layers.concatenate([Steering,Acceleration,Brake])
model = Model(inputs=S,outputs=V)
return model, model.trainable_weights, S
def create_model(config_dict,
compile_model=True):
image_inputs = Input(shape=(4096,), name="image_model_input")
image_model = _create_image_model(config_dict=config_dict,
image_inputs=image_inputs)
language_inputs = Input(shape=(config_dict['max_caption_length'],),
name="language_model_input")
language_model = _create_language_model(config_dict=config_dict,
language_inputs=language_inputs)
merged_input = concatenate([image_model, language_model],
name="concatenate_image_language")
merged_input = LSTM(1000,
return_sequences=False,
name="merged_model_lstm")(merged_input)
softmax_output = Dense(units=config_dict["vocabulary_size"],
activation="softmax",
name="merged_model_softmax")(merged_input)
model = Model(inputs=[image_inputs,
language_inputs], outputs=softmax_output)
print(model.summary())
if (compile_model == True):
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
def load_test_data(data_dir, mean_vec, std_vec):
files_list = os.listdir(data_dir)
ac_data = None
for f in files_list:
fname, ext = os.path.splitext(f)
if ext == '.lf0':
print fname
lf0_file = os.path.join(data_dir, f)
gain_file = os.path.join(data_dir, fname + '.gain')
lsf_file = os.path.join(data_dir, fname + '.lsf')
slsf_file = os.path.join(data_dir, fname + '.slsf')
hnr_file = os.path.join(data_dir, fname + '.hnr')
lf0_data = read_binary_file(lf0_file, dim=1)
lsf_data = read_binary_file(lsf_file, dim=30)
slsf_data = read_binary_file(slsf_file, dim=10)
hnr_data = read_binary_file(hnr_file, dim=5)
gain_data = read_binary_file(gain_file, dim=1)
print lsf_data.shape, gain_data.shape, lf0_data.shape, hnr_data.shape, slsf_data.shape
# [lsf gain lf0 hnr slsf]
data = np.concatenate(
(lsf_data, gain_data, lf0_data, hnr_data, slsf_data), axis=1)
print data.shape
scaler = preprocessing.StandardScaler()
scaler.mean_ = mean_vec
scaler.scale_ = std_vec
data = scaler.transform(data)
out_file = os.path.join(data_dir, fname + '.cmp')
with open(out_file, 'w') as fid:
data.tofile(fid)
return ac_data
def test_keras_import(self):
img_input = Input((224, 224, 3))
model = Conv2D(64, (3, 3), padding='same')(img_input)
model = concatenate([img_input, model])
model = Model(img_input, model)
self.keras_type_test(model, 0, 'Concat')
def concat(layer, layer_in, layerId):
out = {layerId: concatenate(layer_in)}
return out
# ********** Noise Layers **********
def mlp_ptscorer(inputs, Ddim, N, l2reg, pfx='out', Dinit='glorot_uniform', sum_mode='sum', extra_inp=[]):
""" Element-wise features from the pair fed to an MLP. """
linear = Activation('linear')
if sum_mode == 'absdiff':
absdiff = Lambda(function=lambda x: K.abs(x[0] - x[1]),
output_shape=lambda shape: shape[0])
# model.add_node(name=pfx+'sum', layer=absdiff_merge(model, inputs))
mlp_inputs = absdiff(inputs)
elif sum_mode == 'sum':
outsum = linear(add(inputs))
outmul = linear(multiply(inputs))
mlp_inputs = [outsum, outmul] + extra_inp
def mlp_args(mlp_inputs):
""" return model.add_node() args that are good for mlp_inputs list
of both length 1 and more than 1. """
if isinstance(mlp_inputs, list):
mlp_inputs = concatenate(mlp_inputs)
return mlp_inputs
# Ddim may be either 0 (no hidden layer), scalar (single hidden layer) or
# list (multiple hidden layers)
if Ddim == 0:
mlp_inputs = mlp_args(mlp_inputs)
Ddim = []
elif not isinstance(Ddim, list):
Ddim = [Ddim]
if Ddim:
for i, D in enumerate(Ddim):
mlp_inputs = Dense(int(N*D), activation='tanh', kernel_initializer=Dinit, kernel_regularizer=l2(l2reg))(mlp_args(mlp_inputs))
# model.add_node(name=pfx+'hdn[%d]'%(i,),
# layer=Dense(output_dim=int(N*D), W_regularizer=l2(l2reg), activation='tanh', init=Dinit),
# **mlp_args(mlp_inputs))
# mlp_inputs = [pfx+'hdn[%d]'%(i,)]
outmlp = Dense(1, kernel_regularizer=l2(l2reg))(mlp_inputs)
return outmlp
def mlp_ptscorer(inputs, Ddim, N, l2reg, pfx='out', Dinit='glorot_uniform', sum_mode='sum', extra_inp=[]):
""" Element-wise features from the pair fed to an MLP. """
linear = Activation('linear')
if sum_mode == 'absdiff':
absdiff = Lambda(function=lambda x: K.abs(x[0] - x[1]),
output_shape=lambda shape: shape[0])
# model.add_node(name=pfx+'sum', layer=absdiff_merge(model, inputs))
mlp_inputs = absdiff(inputs)
elif sum_mode == 'sum':
outsum = linear(add(inputs))
outmul = linear(multiply(inputs))
mlp_inputs = [outsum, outmul] + extra_inp
def mlp_args(mlp_inputs):
""" return model.add_node() args that are good for mlp_inputs list
of both length 1 and more than 1. """
if isinstance(mlp_inputs, list):
mlp_inputs = concatenate(mlp_inputs)
return mlp_inputs
# Ddim may be either 0 (no hidden layer), scalar (single hidden layer) or
# list (multiple hidden layers)
if Ddim == 0:
mlp_inputs = mlp_args(mlp_inputs)
Ddim = []
elif not isinstance(Ddim, list):
Ddim = [Ddim]
if Ddim:
for i, D in enumerate(Ddim):
mlp_inputs = Dense(int(N*D), activation='tanh', kernel_initializer=Dinit, kernel_regularizer=l2(l2reg))(mlp_args(mlp_inputs))
# model.add_node(name=pfx+'hdn[%d]'%(i,),
# layer=Dense(output_dim=int(N*D), W_regularizer=l2(l2reg), activation='tanh', init=Dinit),
# **mlp_args(mlp_inputs))
# mlp_inputs = [pfx+'hdn[%d]'%(i,)]
outmlp = Dense(1, kernel_regularizer=l2(l2reg))(mlp_inputs)
return outmlp