def build(self):
subject = self.subject
relation = self.relation
object_ = self.get_object()
embedding_size = self.model_params.get('n_embed_dims', 100)
# add embedding layers
embedding_rel = Embedding(input_dim=self.config['n_words'],
output_dim=self.model_params.get('n_embed_dims', 100),
init='he_uniform',
mask_zero=False)
embedding_ent = Embedding(input_dim=self.config['n_words'],
output_dim=self.model_params.get('n_embed_dims', 100),
init='he_uniform',
W_constraint=unitnorm(axis=1),
mask_zero=False)
subject_embedding = embedding_ent(subject)
relation_embedding = embedding_rel(relation)
object_embedding = embedding_ent(object_)
subject_output = Reshape((embedding_size,))(subject_embedding)
relation_output = Reshape((embedding_size,))(relation_embedding)
object_output = Reshape((embedding_size,))(object_embedding)
return subject_output, relation_output, object_output
python类unitnorm()的实例源码
def test_unitnorm():
unitnorm_instance = constraints.unitnorm()
normalized = unitnorm_instance(K.variable(example_array))
norm_of_normalized = np.sqrt(np.sum(K.eval(normalized)**2, axis=0))
# in the unit norm constraint, it should be equal to 1.
difference = norm_of_normalized - 1.
largest_difference = np.max(np.abs(difference))
assert(np.abs(largest_difference) < 10e-5)
def test_unitnorm(self):
from keras.constraints import unitnorm
unitnorm_instance = unitnorm()
normalized = unitnorm_instance(self.example_array)
norm_of_normalized = np.sqrt(np.sum(normalized.eval()**2, axis=1))
difference = norm_of_normalized - 1. #in the unit norm constraint, it should be equal to 1.
largest_difference = np.max(np.abs(difference))
self.assertAlmostEqual(largest_difference, 0.)
def test_unitnorm_constraint(self):
lookup = Sequential()
lookup.add(Embedding(3, 2, weights=[self.W1], W_constraint=unitnorm()))
lookup.add(Flatten())
lookup.add(Dense(2, 1))
lookup.add(Activation('sigmoid'))
lookup.compile(loss='binary_crossentropy', optimizer='sgd', class_mode='binary')
lookup.train(self.X1, np.array([[1], [0]], dtype='int32'))
norm = np.linalg.norm(lookup.params[0].get_value(), axis=1)
self.assertTrue(np.allclose(norm, np.ones_like(norm).astype('float32')))
def test_unitnorm():
unitnorm_instance = constraints.unitnorm()
normalized = unitnorm_instance(K.variable(example_array))
norm_of_normalized = np.sqrt(np.sum(K.eval(normalized)**2, axis=0))
# in the unit norm constraint, it should be equal to 1.
difference = norm_of_normalized - 1.
largest_difference = np.max(np.abs(difference))
assert(np.abs(largest_difference) < 10e-5)
def test_unitnorm():
unitnorm_instance = constraints.unitnorm()
normalized = unitnorm_instance(K.variable(example_array))
norm_of_normalized = np.sqrt(np.sum(K.eval(normalized)**2, axis=0))
# in the unit norm constraint, it should be equal to 1.
difference = norm_of_normalized - 1.
largest_difference = np.max(np.abs(difference))
assert(np.abs(largest_difference) < 10e-5)
def test_unitnorm(self):
from keras.constraints import unitnorm
unitnorm_instance = unitnorm()
normalized = unitnorm_instance(self.example_array)
norm_of_normalized = np.sqrt(np.sum(normalized.eval()**2, axis=1))
difference = norm_of_normalized - 1. # in the unit norm constraint, it should be equal to 1.
largest_difference = np.max(np.abs(difference))
self.assertAlmostEqual(largest_difference, 0.)
def test_unitnorm_constraint(self):
lookup = Sequential()
lookup.add(Embedding(3, 2, weights=[self.W1], W_constraint=unitnorm(), input_length=1))
lookup.add(Flatten())
lookup.add(Dense(1))
lookup.add(Activation('sigmoid'))
lookup.compile(loss='binary_crossentropy', optimizer='sgd', class_mode='binary')
lookup.train_on_batch(self.X1, np.array([[1], [0]], dtype='int32'))
norm = np.linalg.norm(lookup.params[0].get_value(), axis=1)
self.assertTrue(np.allclose(norm, np.ones_like(norm).astype('float32')))
def test_unitnorm(self):
from keras.constraints import unitnorm
unitnorm_instance = unitnorm()
normalized = unitnorm_instance(self.example_array)
norm_of_normalized = np.sqrt(np.sum(normalized.eval()**2, axis=1))
difference = norm_of_normalized - 1. #in the unit norm constraint, it should be equal to 1.
largest_difference = np.max(np.abs(difference))
self.assertAlmostEqual(largest_difference, 0.)
def test_unitnorm_constraint(self):
lookup = Sequential()
lookup.add(Embedding(3, 2, weights=[self.W1], W_constraint=unitnorm()))
lookup.add(Flatten())
lookup.add(Dense(2, 1))
lookup.add(Activation('sigmoid'))
lookup.compile(loss='binary_crossentropy', optimizer='sgd', class_mode='binary')
lookup.train(self.X1, np.array([[1], [0]], dtype='int32'))
norm = np.linalg.norm(lookup.params[0].get_value(), axis=1)
self.assertTrue(np.allclose(norm, np.ones_like(norm).astype('float32')))