def build(self, input_shape):
# input shape is (batch_size, num_words, num_senses, num_hyps)
self.num_senses = input_shape[-2]
self.num_hyps = input_shape[-1] - 1 # -1 because the last value is a word index
# embedding of size 1.
if self.set_sense_priors:
self.sense_priors = self._get_initial_sense_priors((self.word_index_size, 1), name='{}_sense_priors'.format(self.name))
else:
# OntoLSTM makes sense proabilities uniform if the passed sense parameters are zero.
self.sense_priors = K.zeros((self.word_index_size, 1)) # uniform sense probs
# Keeping aside the initial weights to not let Embedding set them. It wouldn't know what sense priors are.
if self.initial_weights is not None:
self.onto_aware_embedding_weights = self.initial_weights
self.initial_weights = None
# The following method will set self.trainable_weights
super(OntoAwareEmbedding, self).build(input_shape) # input_shape will not be used by Embedding's build.
if not self.tune_embedding:
# Move embedding to non_trainable_weights
self._non_trainable_weights.append(self._trainable_weights.pop())
if self.set_sense_priors:
self._trainable_weights.append(self.sense_priors)
if self.onto_aware_embedding_weights is not None:
self.set_weights(self.onto_aware_embedding_weights)
评论列表
文章目录