def test_permute():
layer_test(core.Permute,
kwargs={'dims': (2, 1)},
input_shape=(3, 2, 4))
python类Permute()的实例源码
def create_network():
# PixelCNN architecture, no pooling layer
x = Input(batch_shape=(batch_size,n_channel,mnist_dim,mnist_dim))
# First layer using mask A
x_ = Convolution2DNoFlip(*first_layer, input_shape=(1, 28, 28), border_mode='same', mask='A')(x)
# Second type of layers using mask B
for i in range(n_layer // 2):
x_1 = Convolution2DNoFlip(*second_layer, activation='relu', border_mode='same', mask='B')(x_)
x_2 = Convolution2DNoFlip(*second_layer, activation='relu', border_mode='same', mask='B')(x_1)
if res_connections:
x_ = merge([x_, x_2], mode='sum')
else:
x_ = x_2
# 2 layers of Relu followed by 1x1 conv
x_ = Convolution2DNoFlip(64, 1, 1, activation='relu', border_mode='same', mask='B')(x_)
x_ = Convolution2DNoFlip(128, 1, 1, activation='relu', border_mode='same', mask='B')(x_)
# Depending on the output
x_ = Convolution2DNoFlip(*third_layer,border_mode='same', mask='B')(x_)
if MODE == '256ary':
x_ = Reshape((256, mnist_dim**2))(x_)
x_ = Permute((2,1))(x_)
y = Activation(activation)(x_)
model = Model(x, y)
model.compile(optimizer='adagrad', loss=cost)
print "Model compiled"
return model
def Unet (nClasses , optimizer=None , input_width=360 , input_height=480 , nChannels=1 ):
inputs = Input((nChannels, input_height, input_width))
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Dropout(0.2)(conv1)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Dropout(0.2)(conv2)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Dropout(0.2)(conv3)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
up1 = merge([UpSampling2D(size=(2, 2))(conv3), conv2], mode='concat', concat_axis=1)
conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up1)
conv4 = Dropout(0.2)(conv4)
conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv4)
up2 = merge([UpSampling2D(size=(2, 2))(conv4), conv1], mode='concat', concat_axis=1)
conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up2)
conv5 = Dropout(0.2)(conv5)
conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv5)
conv6 = Convolution2D(nClasses, 1, 1, activation='relu',border_mode='same')(conv5)
conv6 = core.Reshape((nClasses,input_height*input_width))(conv6)
conv6 = core.Permute((2,1))(conv6)
conv7 = core.Activation('softmax')(conv6)
model = Model(input=inputs, output=conv7)
if not optimizer is None:
model.compile(loss="categorical_crossentropy", optimizer= optimizer , metrics=['accuracy'] )
return model
def test_permute(self):
"""
Test the conversion of pooling layer.
"""
from keras.layers.core import Permute
# Create a simple Keras model
model = Sequential()
model.add(Permute((3, 2, 1), input_shape=(10, 64,3)))
input_names = ['input']
output_names = ['output']
spec = keras.convert(model, input_names, output_names).get_spec()
self.assertIsNotNone(spec)
# Test the model class
self.assertIsNotNone(spec.description)
self.assertTrue(spec.HasField('neuralNetwork'))
# Test the inputs and outputs
self.assertEquals(len(spec.description.input), len(input_names))
self.assertItemsEqual(input_names,
map(lambda x: x.name, spec.description.input))
self.assertEquals(len(spec.description.output), len(output_names))
self.assertItemsEqual(output_names,
map(lambda x: x.name, spec.description.output))
# Test the layer parameters.
layers = spec.neuralNetwork.layers
layer_0 = layers[0]
self.assertIsNotNone(layer_0.permute)
def test_permute(self):
"""
Test the conversion of pooling layer.
"""
from keras.layers.core import Permute
# Create a simple Keras model
model = Sequential()
model.add(Permute((3, 2, 1), input_shape=(10, 64,3)))
input_names = ['input']
output_names = ['output']
spec = keras.convert(model, input_names, output_names).get_spec()
self.assertIsNotNone(spec)
# Test the model class
self.assertIsNotNone(spec.description)
self.assertTrue(spec.HasField('neuralNetwork'))
# Test the inputs and outputs
self.assertEquals(len(spec.description.input), len(input_names))
self.assertEqual(sorted(input_names),
sorted(map(lambda x: x.name, spec.description.input)))
self.assertEquals(len(spec.description.output), len(output_names))
self.assertEqual(sorted(output_names),
sorted(map(lambda x: x.name, spec.description.output)))
# Test the layer parameters.
layers = spec.neuralNetwork.layers
layer_0 = layers[0]
self.assertIsNotNone(layer_0.permute)
def test_permute():
layer_test(core.Permute,
kwargs={'dims': (2, 1)},
input_shape=(3, 2, 4))
def create_miml_model(base_model, L, K, name="miml"):
"""
Arguments:
base_model (Sequential):
A Neural Network in keras form (e.g. VGG, GoogLeNet)
L (int):
number of labels
K (int):
number of sub categories
"""
model = Sequential(layers=base_model.layers, name=name)
# input: feature_map.shape = (n_bags, C, H, W)
_, C, H, W = model.layers[-1].output_shape
print("Creating miml... input feature_map.shape={},{},{}".format(C, H, W))
n_instances = H * W
# shape -> (n_bags, (L * K), n_instances, 1)
model.add(Convolution2D(L * K, 1, 1, name=MIML_FIRST_LAYER_NAME))
# shape -> (n_bags, L, K, n_instances)
model.add(Reshape((L, K, n_instances), name=MIML_CUBE_LAYER_NAME))
# shape -> (n_bags, L, 1, n_instances)
model.add(MaxPooling2D((K, 1), strides=(1, 1)))
# softmax
model.add(Reshape((L, n_instances)))
model.add(Permute((2, 1)))
model.add(Activation("softmax"))
model.add(Permute((2, 1)))
model.add(Reshape((L, 1, n_instances), name=MIML_TABLE_LAYER_NAME))
# shape -> (n_bags, L, 1, 1)
model.add(MaxPooling2D((1, n_instances), strides=(1, 1)))
# shape -> (n_bags, L)
model.add(Reshape((L,), name=MIML_OUTPUT_LAYER_NAME))
return model
def test_permute():
layer_test(core.Permute,
kwargs={'dims': (2, 1)},
input_shape=(3, 2, 4))
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1):
# main branch
internal = output // internal_scale
encoder = inp
# 1x1
input_stride = 2 if downsample else 1 # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
encoder = Conv2D(internal, (input_stride, input_stride),
# padding='same',
strides=(input_stride, input_stride), use_bias=False)(encoder)
# Batch normalization + PReLU
encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99
encoder = PReLU(shared_axes=[1, 2])(encoder)
# conv
if not asymmetric and not dilated:
encoder = Conv2D(internal, (3, 3), padding='same')(encoder)
elif asymmetric:
encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder)
encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder)
elif dilated:
encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder)
else:
raise(Exception('You shouldn\'t be here'))
encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99
encoder = PReLU(shared_axes=[1, 2])(encoder)
# 1x1
encoder = Conv2D(output, (1, 1), use_bias=False)(encoder)
encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99
encoder = SpatialDropout2D(dropout_rate)(encoder)
other = inp
# other branch
if downsample:
other = MaxPooling2D()(other)
other = Permute((1, 3, 2))(other)
pad_feature_maps = output - inp.get_shape().as_list()[3]
tb_pad = (0, 0)
lr_pad = (0, pad_feature_maps)
other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
other = Permute((1, 3, 2))(other)
encoder = add([encoder, other])
encoder = PReLU(shared_axes=[1, 2])(encoder)
return encoder
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1):
# main branch
internal = output // internal_scale
encoder = inp
# 1x1
input_stride = 2 if downsample else 1 # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
encoder = Conv2D(internal, (input_stride, input_stride),
# padding='same',
strides=(input_stride, input_stride), use_bias=False)(encoder)
# Batch normalization + PReLU
encoder = BatchNormalization(momentum=0.1)(encoder) # enet_unpooling uses momentum of 0.1, keras default is 0.99
encoder = PReLU(shared_axes=[1, 2])(encoder)
# conv
if not asymmetric and not dilated:
encoder = Conv2D(internal, (3, 3), padding='same')(encoder)
elif asymmetric:
encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder)
encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder)
elif dilated:
encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder)
else:
raise(Exception('You shouldn\'t be here'))
encoder = BatchNormalization(momentum=0.1)(encoder) # enet_unpooling uses momentum of 0.1, keras default is 0.99
encoder = PReLU(shared_axes=[1, 2])(encoder)
# 1x1
encoder = Conv2D(output, (1, 1), use_bias=False)(encoder)
encoder = BatchNormalization(momentum=0.1)(encoder) # enet_unpooling uses momentum of 0.1, keras default is 0.99
encoder = SpatialDropout2D(dropout_rate)(encoder)
other = inp
# other branch
if downsample:
other, indices = MaxPoolingWithArgmax2D()(other)
other = Permute((1, 3, 2))(other)
pad_feature_maps = output - inp.get_shape().as_list()[3]
tb_pad = (0, 0)
lr_pad = (0, pad_feature_maps)
other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
other = Permute((1, 3, 2))(other)
encoder = add([encoder, other])
encoder = PReLU(shared_axes=[1, 2])(encoder)
if downsample:
return encoder, indices
else:
return encoder
def build_keras_model_score_word_sg(index_size,vector_size,
#vocab_size,
context_size,
#code_dim,
score_vector_size,
sub_batch_size=256,
word_vectors=None,
score_vectors=None,
hidden_vectors=None,
model=None
):
"""
>>> word_vectors=np.array([[1,2,-1,1],[3,4,-1,-2],[5,6,-2,-2]])
>>> score_vectors=np.array([[10,20,11,21,5,6,7,8],[30,40,33,41,9,8,7,6]])
>>> hidden_vectors=np.array([[1,0,1,1],[0,1,1,1]])
>>> sub_batch_size=3
>>> vector_size=4
>>> score_vector_size=2
>>> kerasmodel=build_keras_model_score_word_sg(index_size=3,vector_size=vector_size,context_size=2,score_vector_size=score_vector_size,sub_batch_size=sub_batch_size,word_vectors=word_vectors,score_vectors=score_vectors,hidden_vectors=hidden_vectors)
>>> ind=[[0,1,2],[1,2,0]]
>>> ipt=[[1,0,1],[0,1,0]]
>>> tmp1=kerasmodel.predict({'index':np.array(ind),'point':np.array(ipt)})
>>> tmp3=np.array([[score_vectors[ipt[i][j]].reshape((score_vector_size,vector_size)).dot(word_vectors[ind[i][j]]) for j in range(sub_batch_size) ] for i in range(2)])
>>> tmp2=np.array([[word_vectors[ind[i][j]].dot(hidden_vectors[ipt[i][j]].T) for j in range(sub_batch_size) ] for i in range(2)])
>>> np.linalg.norm(1/(1+np.exp(-tmp2))-tmp1['code'])+np.linalg.norm(tmp1['score']-tmp3) < 0.0001
True
"""
kerasmodel = Graph()
kerasmodel.add_input(name='point' , input_shape=(sub_batch_size,), dtype=int)
kerasmodel.add_input(name='index' , input_shape=(sub_batch_size,), dtype=int)
if word_vectors is None:
kerasmodel.add_node(Embedding(index_size, vector_size, input_length=sub_batch_size ),name='embedding', input='index')
else:
kerasmodel.add_node(Embedding(index_size, vector_size, input_length=sub_batch_size,weights=[word_vectors]),name='embedding', input='index')
if hidden_vectors is None:
kerasmodel.add_node(Embedding(context_size, vector_size, input_length=sub_batch_size ),name='embedpoint', input='point')
else:
kerasmodel.add_node(Embedding(context_size, vector_size, input_length=sub_batch_size,weights=[hidden_vectors]),name='embedpoint', input='point')
kerasmodel.add_node(Lambda(lambda x:x.sum(2)) , name='merge',inputs=['embedding','embedpoint'], merge_mode='mul')
kerasmodel.add_node(Activation('sigmoid'), name='sigmoid', input='merge')
kerasmodel.add_output(name='code',input='sigmoid')
if score_vectors is None:
kerasmodel.add_node(Embedding(context_size, score_vector_size*vector_size, input_length=sub_batch_size, ),name='embedscore', input='point')
else:
kerasmodel.add_node(Embedding(context_size, score_vector_size*vector_size, input_length=sub_batch_size,weights=[score_vectors]),name='embedscore', input='point')
kerasmodel.add_node(Reshape((sub_batch_size,score_vector_size,vector_size,)) , name='score1',input='embedscore')
kerasmodel.add_node(Flatten(), name='index1',input='embedding')
kerasmodel.add_node(RepeatVector(score_vector_size), name='index2',input='index1')
kerasmodel.add_node(Reshape((score_vector_size,sub_batch_size,vector_size,)) , name='index3',input='index2')
kerasmodel.add_node(Permute((2,1,3,)) , name='index4',input='index3')
kerasmodel.add_node(Lambda(lambda x:x.sum(-1)) , name='scorenode',inputs=['score1','index4'], merge_mode='mul')
kerasmodel.add_output(name='score',input='scorenode')
kerasmodel.compile('rmsprop', {'code':'mse','score':'mse'})
return kerasmodel