def test_lambda():
from keras.utils.layer_utils import layer_from_config
Lambda = core.Lambda
layer_test(Lambda,
kwargs={'function': lambda x: x + 1},
input_shape=(3, 2))
layer_test(Lambda,
kwargs={'function': lambda x, a, b: x * a + b,
'arguments': {'a': 0.6, 'b': 0.4}},
input_shape=(3, 2))
# test serialization with function
def f(x):
return x + 1
ld = Lambda(f)
config = ld.get_config()
ld = layer_from_config({'class_name': 'Lambda', 'config': config})
ld = Lambda(lambda x: K.concatenate([K.square(x), x]),
output_shape=lambda s: tuple(list(s)[:-1] + [2 * s[-1]]))
config = ld.get_config()
ld = Lambda.from_config(config)
# test serialization with output_shape function
def f(x):
return K.concatenate([K.square(x), x])
def f_shape(s):
return tuple(list(s)[:-1] + [2 * s[-1]])
ld = Lambda(f, output_shape=f_shape)
config = ld.get_config()
ld = layer_from_config({'class_name': 'Lambda', 'config': config})
python类Lambda()的实例源码
def __call__(self, xW, layer_idx):
'''calculate gated activation maps given input maps '''
if self.stack_name == 'vertical':
stack_tag = 'v'
elif self.stack_name == 'horizontal':
stack_tag = 'h'
if self.crop_right:
xW = Lambda(self._crop_right, name='h_crop_right_'+str(layer_idx))(xW)
if self.v_map is not None:
xW = merge([xW, self.v_map], mode='sum', name='h_merge_v_'+str(layer_idx))
if self.h is not None:
hV = Dense(output_dim=2*self.nb_filters, name=stack_tag+'_dense_latent_'+str(layer_idx))(self.h)
hV = Reshape((1, 1, 2*self.nb_filters), name=stack_tag+'_reshape_latent_'+str(layer_idx))(hV)
#xW = merge([xW, hV], mode=lambda x: x[0]+x[1])
xW = Lambda(lambda x: x[0]+x[1], name=stack_tag+'_merge_latent_'+str(layer_idx))([xW,hV])
xW_f = Lambda(lambda x: x[:,:,:,:self.nb_filters], name=stack_tag+'_Wf_'+str(layer_idx))(xW)
xW_g = Lambda(lambda x: x[:,:,:,self.nb_filters:], name=stack_tag+'_Wg_'+str(layer_idx))(xW)
xW_f = Lambda(lambda x: K.tanh(x), name=stack_tag+'_tanh_'+str(layer_idx))(xW_f)
xW_g = Lambda(lambda x: K.sigmoid(x), name=stack_tag+'_sigmoid_'+str(layer_idx))(xW_g)
res = merge([xW_f, xW_g], mode='mul', name=stack_tag+'_merge_gate_'+str(layer_idx))
#print(type(res), K.int_shape(res), hasattr(res, '_keras_history'))
return res
def _shift_down(x):
x_shape = K.int_shape(x)
x = ZeroPadding2D(padding=(1,0,0,0))(x)
x = Lambda(lambda x: x[:,:x_shape[1],:,:])(x)
return x
def _feed_v_map(self, x, layer_idx):
### shifting down feature maps
x = Lambda(self._shift_down, name='v_shift_down'+str(layer_idx))(x)
x = Convolution2D(2*self.nb_filters, 1, 1, border_mode='valid', name='v_1x1_conv_'+str(layer_idx))(x)
return x
def constructNet(input_dim=784,n_hidden=1000,n_out=1000,nb_filter=50,prob=0.5,lr=0.0001):
nb_filters=50
input_img= Input(shape=list(input_dim))
a = input_img
a1 = AtrousConvolution2D(nb_filters, 3, 3,atrous_rate=(1,1),border_mode='same')(a)
b = AtrousConvolution2D(nb_filters, 3, 3,atrous_rate=(1,1),border_mode='same')(a) #We only use the diagonal output from this, TODO: only filter diagonal
a2=Lambda(GetDiag, output_shape=out_diag_shape)(b)
comb=merge([a1,a2],mode='sum')
comb = BatchNormalization()(comb)
a = Activation('relu')(comb)
l=5
for i in range(1,l):
a1 = AtrousConvolution2D(nb_filters, 3, 3,atrous_rate=(l,l),border_mode='same')(a)
b = AtrousConvolution2D(nb_filters, 3, 3,atrous_rate=(l,l),border_mode='same')(a) #We only use the diagonal output from this, TODO: only filter diagonal
a2=Lambda(GetDiag, output_shape=out_diag_shape)(b)
comb=merge([a1,a2],mode='sum')
comb = BatchNormalization()(comb)
a = Activation('relu')(comb)
decoded = Convolution2D(1, 1, 1, activation='sigmoid', border_mode='same')(a)
final=Flatten()(decoded)
model = Model(input_img, final)
model.summary()
model.compile(optimizer='adam', loss='binary_crossentropy')
return model
model_util.py 文件源码
项目:keras-mxnet-benchmarks
作者: sandeep-krishnamurthy
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def prepare_tensorflow_multi_gpu_model(model, kwargs):
multi_input = True if type(model.input_shape) is list else False
multi_output = True if len(model.outputs) > 1 else False
x = [Input(shape[1:]) for shape in model.input_shape] if multi_input else Input(model.input_shape[1:])
towers = []
outputs = []
for _ in range(len(model.outputs)):
outputs.append([])
for g in range(GPU_NUM):
with tf.device('/gpu:' + str(g)):
slice_g = [Lambda(slice_batch, lambda shape: shape, arguments={'n_gpus':GPU_NUM, 'part':g})(y) for y in x] \
if multi_input \
else Lambda(slice_batch, lambda shape: shape, arguments={'n_gpus':GPU_NUM, 'part':g})(x)
output_model = model(slice_g)
if multi_output:
for num in range(len(output_model)):
outputs[num].append(output_model[num])
else:
towers.append(output_model)
with tf.device('/cpu:0'):
merged = []
if multi_output:
merged = []
for output in outputs:
merged.append(merge(output, mode='concat', concat_axis=0))
else:
merged = merge(towers, mode='concat', concat_axis=0)
model = Model(input= x if type(x) is list else [x], output=merged)
model.compile(**kwargs)
return model
def steering_net():
model = Sequential()
model.add(Convolution2D(24, 5, 5, init = normal_init, subsample= (2, 2), name='conv1_1', input_shape=(66, 200, 3)))
model.add(Activation('relu'))
model.add(Convolution2D(36, 5, 5, init = normal_init, subsample= (2, 2), name='conv2_1'))
model.add(Activation('relu'))
model.add(Convolution2D(48, 5, 5, init = normal_init, subsample= (2, 2), name='conv3_1'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, init = normal_init, subsample= (1, 1), name='conv4_1'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, init = normal_init, subsample= (1, 1), name='conv4_2'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(1164, init = normal_init, name = "dense_0"))
model.add(Activation('relu'))
#model.add(Dropout(p))
model.add(Dense(100, init = normal_init, name = "dense_1"))
model.add(Activation('relu'))
#model.add(Dropout(p))
model.add(Dense(50, init = normal_init, name = "dense_2"))
model.add(Activation('relu'))
#model.add(Dropout(p))
model.add(Dense(10, init = normal_init, name = "dense_3"))
model.add(Activation('relu'))
model.add(Dense(1, init = normal_init, name = "dense_4"))
model.add(Lambda(atan_layer, output_shape = atan_layer_shape, name = "atan_0"))
return model
def build_lstm(n_con,n_emb,vocabs_size,n_dis,emb_size,cluster_size):
hidden_size = 800
con = Sequential()
con.add(Dense(input_dim=n_con,output_dim=emb_size))
emb_list = []
for i in range(n_emb):
emb = Sequential()
emb.add(Embedding(input_dim=vocabs_size[i],output_dim=emb_size,input_length=n_dis))
emb.add(Flatten())
emb_list.append(emb)
in_dimension = 2
seq = Sequential()
seq.add(BatchNormalization(input_shape=((MAX_LENGTH,in_dimension))))
seq.add(Masking([0]*in_dimension,input_shape=(MAX_LENGTH,in_dimension)))
seq.add(LSTM(emb_size,return_sequences=False,input_shape=(MAX_LENGTH,in_dimension)))
model = Sequential()
model.add(Merge([con]+emb_list+[seq],mode='concat'))
model.add(BatchNormalization())
model.add(Dense(hidden_size,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(cluster_size,activation='softmax'))
model.add(Lambda(caluate_point,output_shape=[2]))
return model
def __grouped_convolution_block(input, grouped_channels, cardinality, strides, weight_decay=5e-4):
''' Adds a grouped convolution block. It is an equivalent block from the paper
Args:
input: input tensor
grouped_channels: grouped number of filters
cardinality: cardinality factor describing the number of groups
strides: performs strided convolution for downscaling if > 1
weight_decay: weight decay term
Returns: a keras tensor
'''
init = input
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
group_list = []
if cardinality == 1:
# with cardinality 1, it is a standard convolution
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
x = BatchNormalization(axis=channel_axis)(x)
x = LeakyReLU()(x)
return x
for c in range(cardinality):
x = Lambda(lambda z: z[:, :, :, c * grouped_channels:(c + 1) * grouped_channels]
if K.image_data_format() == 'channels_last' else
lambda z: z[:, c * grouped_channels:(c + 1) * grouped_channels, :, :])(input)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
group_merge = concatenate(group_list, axis=channel_axis)
x = BatchNormalization(axis=channel_axis)(group_merge)
x = LeakyReLU()(x)
return x
def get_tra_ori():
img_input=Input(shape=(None, None, 1))
theta = Lambda(orientation)(img_input)
model = Model(inputs=[img_input,], outputs=[theta,])
return model
def get_tra_ori():
img_input=Input(shape=(None, None, 1))
theta = Lambda(orientation)(img_input)
model = Model(inputs=[img_input,], outputs=[theta,])
return model
def test_lambda():
from keras.utils.layer_utils import layer_from_config
Lambda = core.Lambda
layer_test(Lambda,
kwargs={'function': lambda x: x + 1},
input_shape=(3, 2))
layer_test(Lambda,
kwargs={'function': lambda x, a, b: x * a + b,
'arguments': {'a': 0.6, 'b': 0.4}},
input_shape=(3, 2))
# test serialization with function
def f(x):
return x + 1
ld = Lambda(f)
config = ld.get_config()
ld = layer_from_config({'class_name': 'Lambda', 'config': config})
ld = Lambda(lambda x: K.concatenate([K.square(x), x]),
output_shape=lambda s: tuple(list(s)[:-1] + [2 * s[-1]]))
config = ld.get_config()
ld = Lambda.from_config(config)
# test serialization with output_shape function
def f(x):
return K.concatenate([K.square(x), x])
def f_shape(s):
return tuple(list(s)[:-1] + [2 * s[-1]])
ld = Lambda(f, output_shape=f_shape)
config = ld.get_config()
ld = layer_from_config({'class_name': 'Lambda', 'config': config})
def crosschannelnormalization(alpha=1e-4, k=2, beta=0.75, n=5, **kwargs):
"""
This is the function used for cross channel normalization in the original
Alexnet
"""
def f(X):
if K.image_dim_ordering()=='tf':
b, r, c, ch = X.get_shape()
else:
b, ch, r, c = X.shape
half = n // 2
square = K.square(X)
scale = k
if K.image_dim_ordering() == 'th':
extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0, 2, 3, 1)), (0, half))
extra_channels = K.permute_dimensions(extra_channels, (0, 3, 1, 2))
for i in range(n):
scale += alpha * extra_channels[:, i:i+ch, :, :]
if K.image_dim_ordering() == 'tf':
extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0, 3, 1, 2)), (half, 0))
extra_channels = K.permute_dimensions(extra_channels, (0, 2, 3, 1))
for i in range(n):
scale += alpha * extra_channels[:, :, :, i:i+int(ch)]
scale = scale ** beta
return X / scale
return Lambda(f, output_shape=lambda input_shape: input_shape, **kwargs)
def build_network(self, conf, model=None, input_shape=None, is_conv=True):
"""Build network"""
_model = model
model = Sequential()
if _model is None:
model.add(Lambda(lambda x: x, input_shape=input_shape))
else:
model.add(_model)
for x in conf:
if x['is_drop']:
model.add(Dropout(x['drop_rate']))
if x['type'] is 'full':
if is_conv:
model.add(Flatten())
is_conv = False
model.add(Dense(x['n_feature']))
elif x['type'] is 'conv':
model.add(Convolution2D(nb_filter=x['n_feature'],
nb_row=x['kw'],
nb_col=1,
border_mode='same'))
is_conv=True
if x['is_batch']:
if x['type'] is 'full':
model.add(BatchNormalization(mode=1, axis=-1))
if x['type'] is 'conv':
model.add(BatchNormalization(mode=2, axis=-1))
model.add(x['activation'])
return model
def grad_cam(input_model, image, category_index, layer_name):
model = Sequential()
model.add(input_model)
nb_classes = 1000
target_layer = lambda x: target_category_loss(x, category_index, nb_classes)
model.add(Lambda(target_layer,
output_shape = target_category_loss_output_shape))
loss = K.sum(model.layers[-1].output)
conv_output = [l for l in model.layers[0].layers if l.name is layer_name][0].output
grads = normalize(K.gradients(loss, conv_output)[0])
gradient_function = K.function([model.layers[0].input], [conv_output, grads])
output, grads_val = gradient_function([image])
output, grads_val = output[0, :], grads_val[0, :, :, :]
weights = np.mean(grads_val, axis = (0, 1))
cam = np.ones(output.shape[0 : 2], dtype = np.float32)
for i, w in enumerate(weights):
cam += w * output[:, :, i]
cam = cv2.resize(cam, (224, 224))
cam = np.maximum(cam, 0)
heatmap = cam / np.max(cam)
#Return to BGR [0..255] from the preprocessed image
image = image[0, :]
image -= np.min(image)
image = np.minimum(image, 255)
cam = cv2.applyColorMap(np.uint8(255*heatmap), cv2.COLORMAP_JET)
cam = np.float32(cam) + np.float32(image)
cam = 255 * cam / np.max(cam)
return np.uint8(cam), heatmap
def train_model(x_tr, y_tr, conv_f_n):
save_name = 'shape_match_model_epi.h5'
tr_epoch = 15
input_dim = x_tr.shape[2:]
input_a = Input(shape=input_dim)
input_b = Input(shape=input_dim)
base_network = create_cnn_network(input_dim, conv_f_n)
processed_a = base_network(input_a)
processed_b = base_network(input_b)
distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([processed_a, processed_b])
model_tr = Model(input=[input_a, input_b], output=distance)
# train
# opt_func = RMSprop(lr=.0005, clipnorm=1)
opt_func = RMSprop()
model_tr.compile(loss=contrastive_loss, optimizer=opt_func)
model_tr.fit([x_tr[:, 0], x_tr[:, 1]], y_tr, validation_split=.25,
batch_size=32, verbose=2, nb_epoch=tr_epoch, callbacks=[EarlyStopping(monitor='val_loss', patience=2)])
model_tr.save(save_name)
return model_tr
# test, also provide info on which pair it was trained on and which it was tested on
def train_model(x_tr, y_tr, conv_f_n, dense_n):
save_name = '/home/nripesh/PycharmProjects/Siamese/siamese_supervised/shape_match_model_endo_k3_new.h5'
tr_epoch = 10
input_dim = x_tr.shape[2:]
input_a = Input(shape=input_dim)
input_b = Input(shape=input_dim)
base_network = create_cnn_network(input_dim, conv_f_n, dense_n)
processed_a = base_network(input_a)
processed_b = base_network(input_b)
distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([processed_a, processed_b])
model_tr = Model(inputs=[input_a, input_b], outputs=distance)
# train
# opt_func = RMSprop(lr=.0005, clipnorm=1)
opt_func = RMSprop()
model_tr.compile(loss=contrastive_loss, optimizer=opt_func)
model_tr.fit([x_tr[:, 0], x_tr[:, 1]], y_tr, validation_split=.30,
batch_size=128, verbose=2, epochs=tr_epoch, callbacks=[EarlyStopping(monitor='val_loss', patience=2)])
model_tr.save(save_name)
return model_tr
# test, also provide info on which pair it was trained on and which it was tested on
def train_model(x_tr, y_tr, conv_f_n):
save_name = 'shape_match_model_endo.h5'
tr_epoch = 15
input_dim = x_tr.shape[2:]
input_a = Input(shape=input_dim)
input_b = Input(shape=input_dim)
base_network = create_cnn_network(input_dim, conv_f_n)
processed_a = base_network(input_a)
processed_b = base_network(input_b)
distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([processed_a, processed_b])
model_tr = Model(input=[input_a, input_b], output=distance)
# train
# opt_func = RMSprop(lr=.0005, clipnorm=1)
opt_func = RMSprop()
model_tr.compile(loss=contrastive_loss, optimizer=opt_func)
model_tr.fit([x_tr[:, 0], x_tr[:, 1]], y_tr, validation_split=.25,
batch_size=32, verbose=2, nb_epoch=tr_epoch, callbacks=[EarlyStopping(monitor='val_loss', patience=2)])
model_tr.save(save_name)
return model_tr
# test, also provide info on which pair it was trained on and which it was tested on
def train_model(x_tr, y_tr, conv_f_n, dense_n):
save_name = '/home/nripesh/PycharmProjects/Siamese/siamese_supervised/shape_match_model_epi_sx4.h5'
tr_epoch = 20
input_dim = x_tr.shape[2:]
input_a = Input(shape=input_dim)
input_b = Input(shape=input_dim)
base_network = create_cnn_network(input_dim, conv_f_n, dense_n)
processed_a = base_network(input_a)
processed_b = base_network(input_b)
distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([processed_a, processed_b])
model_tr = Model(inputs=[input_a, input_b], outputs=distance)
# train
opt_func = RMSprop(lr=.003)
model_tr.compile(loss=contrastive_loss, optimizer=opt_func)
history = model_tr.fit([x_tr[:, 0], x_tr[:, 1]], y_tr, validation_split=.30,
batch_size=128, verbose=2, epochs=tr_epoch,
callbacks=[EarlyStopping(monitor='val_loss', patience=2)])
# summarize history for loss
plt.figure(1)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
# plt.show()
plt.savefig('/home/nripesh/PycharmProjects/Siamese/siamese_supervised/epi_train_val_loss.png')
plt.close(1)
model_tr.save(save_name)
return model_tr
# test, also provide info on which pair it was trained on and which it was tested on
def train_model(x_tr, y_tr, conv_f_n, dense_n):
save_name = 'shape_match_model1.h5'
tr_epoch = 10
input_dim = x_tr.shape[2:]
input_a = Input(shape=input_dim)
input_b = Input(shape=input_dim)
base_network = cnn_block_without_maxpool(input_dim, conv_f_n, dense_n)
processed_a = base_network(input_a)
processed_b = base_network(input_b)
distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([processed_a, processed_b])
model_tr = Model(input=[input_a, input_b], output=distance)
# train
# opt_func = RMSprop(lr=.0005, clipnorm=1)
opt_func = RMSprop()
model_tr.compile(loss=contrastive_loss, optimizer=opt_func)
model_tr.fit([x_tr[:, 0], x_tr[:, 1]], y_tr, validation_split=.30,
batch_size=128, verbose=2, nb_epoch=tr_epoch,
callbacks=[EarlyStopping(monitor='val_loss', patience=2)])
model_tr.save(save_name)
return model_tr
# test, also provide info on which pair it was trained on and which it was tested on