def test_tiny_image_captioning(self):
# use a conv layer as a image feature branch
img_input_1 = Input(shape=(16,16,3))
x = Conv2D(2,(3,3))(img_input_1)
x = Flatten()(x)
img_model = Model(inputs=[img_input_1], outputs=[x])
img_input = Input(shape=(16,16,3))
x = img_model(img_input)
x = Dense(8, name = 'cap_dense')(x)
x = Reshape((1,8), name = 'cap_reshape')(x)
sentence_input = Input(shape=(5,)) # max_length = 5
y = Embedding(8, 8, name = 'cap_embedding')(sentence_input)
z = concatenate([x,y], axis = 1, name = 'cap_merge')
z = LSTM(4, return_sequences = True, name = 'cap_lstm')(z)
z = TimeDistributed(Dense(8), name = 'cap_timedistributed')(z)
combined_model = Model(inputs=[img_input, sentence_input], outputs=[z])
self._test_keras_model(combined_model, one_dim_seq_flags=[False, True])
评论列表
文章目录