def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
python类merge()的实例源码
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor, subsample_factor)
x = BatchNormalization(axis=4)(input_tensor)
x = Activation('relu')(x)
x = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=4)(x)
x = Activation('relu')(x)
x = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution3D(nb_filters, 1, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
subsample = (subsample_factor, subsample_factor)
x = BatchNormalization(axis=3)(input_tensor)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)
if subsample_factor > 1:
shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
else:
shortcut = input_tensor
x = merge([x, shortcut], mode='sum')
return x
def BiDi(input_shape,vocabSize,veclen,wordWeights,nLayers,nHidden,lr):
assert len(nHidden) == nLayers, '#Neurons for each layer does not match #Layers'
r_flag = True
_Input = Input(shape = (input_shape,),dtype = 'int32')
E = keras.layers.embeddings.Embedding(vocabSize,veclen,weights=(wordWeights,),mask_zero = True)(_Input)
for ind in range(nLayers):
if ind == (nLayers-1):
r_flag = False
fwd_layer = keras.layers.recurrent.GRU(nHidden[ind],init='glorot_uniform',inner_init='orthogonal',activation='tanh',inner_activation='hard_sigmoid',return_sequences = r_flag)(E)
bkwd_layer = keras.layers.recurrent.GRU(nHidden[ind],init='glorot_uniform',inner_init='orthogonal',activation='tanh',inner_activation='hard_sigmoid',return_sequences = r_flag,go_backwards = True)(E)
E = merge([fwd_layer,bkwd_layer],mode = 'ave')
#nHidden/= 2
Output = Dense(1,activation = 'sigmoid')(Dropout(0.5)(E))
model = Model(input = _Input, output = Output)
opt = keras.optimizers.Adam(lr)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
def addLayer(previousLayer, nChannels, nOutChannels, dropRate, blockNum):
bn = BatchNormalization(name = 'denseb_BatchNorm_{}'.format(blockNum) , axis = 1)(previousLayer)
relu = Activation('relu', name ='denseb_relu_{}'.format(blockNum))(bn)
conv = Convolution2D(nOutChannels, 3, 3, border_mode='same', name='denseb_conv_{}'.format(blockNum))(relu)
if dropRate is not None:
dp = Dropout(dropRate, name='denseb_dropout_{}'.format)(conv)
return merge([dp, previousLayer], mode='concat', concat_axis=1)
else:
return merge([conv, previousLayer], mode='concat', concat_axis=1)
def create_model(numNodes, factors):
left_input = Input(shape=(1,))
right_input = Input(shape=(1,))
left_model = Sequential()
left_model.add(Embedding(input_dim=numNodes + 1, output_dim=factors, input_length=1, mask_zero=False))
left_model.add(Reshape((factors,)))
right_model = Sequential()
right_model.add(Embedding(input_dim=numNodes + 1, output_dim=factors, input_length=1, mask_zero=False))
right_model.add(Reshape((factors,)))
left_embed = left_model(left_input)
right_embed = left_model(right_input)
left_right_dot = merge([left_embed, right_embed], mode="dot", dot_axes=1, name="left_right_dot")
model = Model(input=[left_input, right_input], output=[left_right_dot])
embed_generator = Model(input=[left_input, right_input], output=[left_embed, right_embed])
return model, embed_generator
def test_merge_mask_3d():
from keras.layers import Input, merge, Embedding, SimpleRNN
from keras.models import Model
rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')
# embeddings
input_a = Input(shape=(3,), dtype='int32')
input_b = Input(shape=(3,), dtype='int32')
embedding = Embedding(3, 4, mask_zero=True)
embedding_a = embedding(input_a)
embedding_b = embedding(input_b)
# rnn
rnn = SimpleRNN(3, return_sequences=True)
rnn_a = rnn(embedding_a)
rnn_b = rnn(embedding_b)
# concatenation
merged_concat = merge([rnn_a, rnn_b], mode='concat', concat_axis=-1)
model = Model([input_a, input_b], [merged_concat])
model.compile(loss='mse', optimizer='sgd')
model.fit([rand(2, 3), rand(2, 3)], [rand(2, 3, 6)])
def transform_model(weight_loss_pix=5e-4):
inputs = Input(shape=( 128, 128, 3))
x1 = Convolution2D(64, 5, 5, border_mode='same')(inputs)
x2 = LeakyReLU(alpha=0.3, name='wkcw')(x1)
x3 = BatchNormalization()(x2)
x4 = Convolution2D(128, 4, 4, border_mode='same', subsample=(2,2))(x3)
x5 = LeakyReLU(alpha=0.3)(x4)
x6 = BatchNormalization()(x5)
x7 = Convolution2D(256, 4, 4, border_mode='same', subsample=(2,2))(x6)
x8 = LeakyReLU(alpha=0.3)(x7)
x9 = BatchNormalization()(x8)
x10 = Deconvolution2D(128, 3, 3, output_shape=(None, 64, 64, 128), border_mode='same', subsample=(2,2))(x9)
x11 = BatchNormalization()(x10)
x12 = Deconvolution2D(64, 3, 3, output_shape=(None, 128, 128, 64), border_mode='same', subsample=(2,2))(x11)
x13 = BatchNormalization()(x12)
x14 = Deconvolution2D(3, 4, 4, output_shape=(None, 128, 128, 3), border_mode='same', activity_regularizer=activity_l1(weight_loss_pix))(x13)
output = merge([inputs, x14], mode='sum')
model = Model(input=inputs, output=output)
return model
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
''' Build a dense_block where the output of each conv_block is fed to subsequent ones
# Arguments
x: input tensor
stage: index for dense block
nb_layers: the number of layers of conv_block to append to the model.
nb_filter: number of filters
growth_rate: growth rate
dropout_rate: dropout rate
weight_decay: weight decay factor
grow_nb_filters: flag to decide to allow number of filters to grow
'''
eps = 1.1e-5
concat_feat = x
for i in range(nb_layers):
branch = i+1
x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
concat_feat = merge([concat_feat, x], mode='concat', concat_axis=concat_axis, name='concat_'+str(stage)+'_'+str(branch))
if grow_nb_filters:
nb_filter += growth_rate
return concat_feat, nb_filter
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
''' Build a dense_block where the output of each conv_block is fed to subsequent ones
# Arguments
x: input tensor
stage: index for dense block
nb_layers: the number of layers of conv_block to append to the model.
nb_filter: number of filters
growth_rate: growth rate
dropout_rate: dropout rate
weight_decay: weight decay factor
grow_nb_filters: flag to decide to allow number of filters to grow
'''
eps = 1.1e-5
concat_feat = x
for i in range(nb_layers):
branch = i+1
x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
concat_feat = merge([concat_feat, x], mode='concat', concat_axis=concat_axis, name='concat_'+str(stage)+'_'+str(branch))
if grow_nb_filters:
nb_filter += growth_rate
return concat_feat, nb_filter
def block_inception_a(input):
if K.image_dim_ordering() == "th":
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 96, 1, 1)
branch_1 = conv2d_bn(input, 64, 1, 1)
branch_1 = conv2d_bn(branch_1, 96, 3, 3)
branch_2 = conv2d_bn(input, 64, 1, 1)
branch_2 = conv2d_bn(branch_2, 96, 3, 3)
branch_2 = conv2d_bn(branch_2, 96, 3, 3)
branch_3 = AveragePooling2D((3,3), strides=(1,1), border_mode='same')(input)
branch_3 = conv2d_bn(branch_3, 96, 1, 1)
x = merge([branch_0, branch_1, branch_2, branch_3], mode='concat', concat_axis=channel_axis)
return x
def block_reduction_a(input):
if K.image_dim_ordering() == "th":
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 384, 3, 3, subsample=(2,2), border_mode='valid')
branch_1 = conv2d_bn(input, 192, 1, 1)
branch_1 = conv2d_bn(branch_1, 224, 3, 3)
branch_1 = conv2d_bn(branch_1, 256, 3, 3, subsample=(2,2), border_mode='valid')
branch_2 = MaxPooling2D((3,3), strides=(2,2), border_mode='valid')(input)
x = merge([branch_0, branch_1, branch_2], mode='concat', concat_axis=channel_axis)
return x
def block_inception_b(input):
if K.image_dim_ordering() == "th":
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 384, 1, 1)
branch_1 = conv2d_bn(input, 192, 1, 1)
branch_1 = conv2d_bn(branch_1, 224, 1, 7)
branch_1 = conv2d_bn(branch_1, 256, 7, 1)
branch_2 = conv2d_bn(input, 192, 1, 1)
branch_2 = conv2d_bn(branch_2, 192, 7, 1)
branch_2 = conv2d_bn(branch_2, 224, 1, 7)
branch_2 = conv2d_bn(branch_2, 224, 7, 1)
branch_2 = conv2d_bn(branch_2, 256, 1, 7)
branch_3 = AveragePooling2D((3,3), strides=(1,1), border_mode='same')(input)
branch_3 = conv2d_bn(branch_3, 128, 1, 1)
x = merge([branch_0, branch_1, branch_2, branch_3], mode='concat', concat_axis=channel_axis)
return x
def create_model(self, ret_model = False):
image_model = Sequential()
image_model.add(Dense(EMBEDDING_DIM, input_dim = 4096, activation='relu'))
image_model.add(RepeatVector(self.max_length))
lang_model = Sequential()
lang_model.add(Embedding(self.vocab_size, 256, input_length=self.max_length))
lang_model.add(LSTM(256,return_sequences=True))
lang_model.add(TimeDistributed(Dense(EMBEDDING_DIM)))
model = Sequential()
model.add(Merge([image_model, lang_model], mode='concat'))
model.add(LSTM(1000,return_sequences=False))
model.add(Dense(self.vocab_size))
model.add(Activation('softmax'))
print ("Model created!")
if(ret_model==True):
return model
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
return model
def __call__(self, model):
original = model
tanh_out = CausalAtrousConvolution1D(self.filters, 2, atrous_rate=self.rate, border_mode='valid')(model)
tanh_out = Activation('tanh')(tanh_out)
sigm_out = CausalAtrousConvolution1D(self.filters, 2, atrous_rate=self.rate, border_mode='valid')(model)
sigm_out = Activation('sigmoid')(sigm_out)
model = Merge(mode='mul')([tanh_out, sigm_out])
skip_x = Convolution1D(self.filters, 1, border_mode='same')(model)
res_x = Convolution1D(self.filters, 1, border_mode='same')(model)
res_x = Merge(mode='sum')([original, res_x])
return res_x, skip_x
def __call__(self, model):
if self.crop_right:
model = Lambda(lambda x: x[:, :, :K.int_shape(x)[2]-1, :])(model)
if self.v is not None:
model = Merge(mode='sum')([model, self.v])
if self.h is not None:
hV = Dense(output_dim=2*self.filters)(self.h)
hV = Reshape((1, 1, 2*self.filters))(hV)
model = Lambda(lambda x: x[0]+x[1])([model,hV])
model_f = Lambda(lambda x: x[:,:,:,:self.filters])(model)
model_g = Lambda(lambda x: x[:,:,:,self.filters:])(model)
model_f = Lambda(lambda x: K.tanh(x))(model_f)
model_g = Lambda(lambda x: K.sigmoid(x))(model_g)
res = Merge(mode='mul')([model_f, model_g])
return res
def __call__(self, model1, model2=None):
if model2 is None:
h_model = model1
filter_size = (7, 7)
else:
h_model = model2
filter_size = (3, 3)
v_model = PaddedConvolution2D(self.filters, filter_size, 'vertical')(model1)
feed_vertical = FeedVertical(self.filters)(v_model)
v_model = GatedBlock(self.filters, h=self.h)(v_model)
h_model_new = PaddedConvolution2D(self.filters, filter_size, 'horizontal', 'A')(h_model)
h_model_new = GatedBlock(self.filters, v=feed_vertical, h=self.h, crop_right=True)(h_model_new)
h_model_new = Convolution2D(self.filters, 1, 1, border_mode='valid')(h_model_new)
return (v_model, h_model_new if model2 is None else Merge(mode='sum')([h_model_new, h_model]))
def CNNWithKeywordLayer(embed_matrix, embed_input, sequence_length, keywords_length, filter_sizes, num_filters, dropout_prob, hidden_dims, model_variation, embedding_dim=300):
''' 2-way input model: left is cnn for sentence embedding while right is keywords
'''
embed1 = Embedding(embed_input, embedding_dim,input_length=sequence_length, weights=[embed_matrix])
# 1. question model part
question_branch = Sequential()
cnn_model = TextCNN(sequence_length, embedding_dim, filter_sizes, num_filters)
question_branch.add(embed1)
question_branch.add(cnn_model)
# 2. keyword model part
#keyword_branch = KeywordLayer(keywords_length, embed_input, embedding_dim, embed_matrix)
keyword_branch = LSTMLayer(embed_matrix, embed_input, keywords_length, dropout_prob, hidden_dims, embedding_dim)
# 3. merge layer
merged = Merge([question_branch, keyword_branch], mode='concat')
final_model = Sequential()
final_model.add(merged)
final_model.add(Dense(hidden_dims, W_constraint = maxnorm(3)))
final_model.add(Dropout(0.5))
final_model.add(Activation('relu'))
final_model.add(Dense(1))
final_model.add(Activation('sigmoid'))
#sgd = SGD(lr=0.01, momentum=0.9)
final_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
return final_model
def QuestionWithAnswersModel(embed_matrix, embed_input, sequence_length, ans_cnt, keywords_length, filter_sizes, num_filters, dropout_prob, hidden_dims, embedding_dim=300):
''' path1: question embedding (CNN model)
path2: answer embeddin(Hierachical RNN model)
merge
'''
# path 1
embed1 = Embedding(embed_input, embedding_dim,input_length=sequence_length, weights=[embed_matrix])
question_branch = Sequential()
cnn_model = TextCNN(sequence_length, embedding_dim, filter_sizes, num_filters)
question_branch.add(embed1)
question_branch.add(cnn_model)
# path 2
answer_branch = HierarchicalRNN(embed_matrix, embed_input, ans_cnt, keywords_length, embedding_dim)
merged = Merge([question_branch, answer_branch], mode='concat')
final_model = Sequential()
final_model.add(merged)
final_model.add(Dense(hidden_dims, W_constraint = maxnorm(3)))
final_model.add(Dropout(0.5))
final_model.add(Activation('relu'))
final_model.add(Dense(1))
final_model.add(Activation('sigmoid'))
final_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
return final_model
# vim: set expandtab ts=4 sw=4 sts=4 tw=100:
def call(self, x, mask=None):
layer_output = self.layer.call(x, mask)
if isinstance(self.merge_mode, str):
self.merge_mode = Merge(mode=self.merge_mode)
output = self.merge_mode([x, layer_output])
return output
def get_config(self):
config = {"merge_mode": {'class_name': 'Merge',
'config': self.merge_mode.get_config()}}
base_config = super(Residual, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, x, mask=None):
layer_output = self.layer.call(x, mask)
if isinstance(self.merge_mode, str):
self.merge_mode = Merge(mode=self.merge_mode)
output = self.merge_mode([x, layer_output])
return output
def get_config(self):
config = {'merge_mode': {'class_name': 'Merge',
'config': self.merge_mode.get_config()}}
base_config = super(Residual, self).get_config()
return dict(list(base_config.items()) + list(config.items()))