def model(self, input_text_begin, input_text_end, gene, variation, num_output_classes,
batch_size, embeddings, training=True, dropout=TC_MODEL_DROPOUT):
"""
Creates a model for text classification
:param tf.Tensor input_text: the input data, the text as
[batch_size, text_vector_max_length, embeddings_size]
:param int num_output_classes: the number of output classes for the classifier
:param int batch_size: batch size, the same used in the dataset
:param List[List[float]] embeddings: a matrix with the embeddings for the embedding lookup
:param int num_hidden: number of hidden GRU cells in every layer
:param int num_layers: number of layers of the model
:param float dropout: dropout value between layers
:param boolean training: whether the model is built for training or not
:return Dict[str,tf.Tensor]: a dict with logits and prediction tensors
"""
input_text_begin = tf.reshape(input_text_begin, [batch_size, MAX_WORDS])
if input_text_end is not None:
input_text_end = tf.reshape(input_text_end, [batch_size, MAX_WORDS])
embedded_sequence_begin, sequence_length_begin, \
embedded_sequence_end, sequence_length_end, \
gene, variation = \
self.model_embedded_sequence(embeddings, input_text_begin, input_text_end, gene,
variation)
_, max_length, _ = tf.unstack(tf.shape(embedded_sequence_begin))
with tf.variable_scope('text_begin'):
output_begin = self.rnn(embedded_sequence_begin, sequence_length_begin, max_length,
dropout, batch_size, training)
if input_text_end is not None:
with tf.variable_scope('text_end'):
output_end = self.rnn(embedded_sequence_end, sequence_length_end, max_length,
dropout, batch_size, training)
output = tf.concat([output_begin, output_end], axis=1)
else:
output = output_begin
# full connected layer
logits = self.model_fully_connected(output, gene, variation, num_output_classes, dropout,
training)
prediction = tf.nn.softmax(logits)
return {
'logits' : logits,
'prediction': prediction,
}
text_classification_model_simple.py 文件源码
python
阅读 24
收藏 0
点赞 0
评论 0
评论列表
文章目录