def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[1]
if self.H == 'Glorot':
self.H = np.float32(np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot H: {}'.format(self.H))
if self.kernel_lr_multiplier == 'Glorot':
self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot learning rate multiplier: {}'.format(self.kernel_lr_multiplier))
self.kernel_constraint = Clip(-self.H, self.H)
self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
self.bias = self.add_weight(shape=(self.output_dim,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.lr_multipliers = [self.kernel_lr_multiplier]
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
python类RandomUniform()的实例源码
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[1]
if self.H == 'Glorot':
self.H = np.float32(np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot H: {}'.format(self.H))
if self.kernel_lr_multiplier == 'Glorot':
self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot learning rate multiplier: {}'.format(self.kernel_lr_multiplier))
self.kernel_constraint = Clip(-self.H, self.H)
self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
self.bias = self.add_weight(shape=(self.output_dim,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.lr_multipliers = [self.kernel_lr_multiplier]
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[1]
if self.H == 'Glorot':
self.H = np.float32(np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot H: {}'.format(self.H))
if self.kernel_lr_multiplier == 'Glorot':
self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot learning rate multiplier: {}'.format(self.lr_multiplier))
self.kernel_constraint = Clip(-self.H, self.H)
self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
self.bias = self.add_weight(shape=(self.output_dim,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.lr_multipliers = [self.kernel_lr_multiplier]
self.bias = None
self.built = True
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
base = self.kernel_size[0] * self.kernel_size[1]
if self.H == 'Glorot':
nb_input = int(input_dim * base)
nb_output = int(self.filters * base)
self.H = np.float32(np.sqrt(1.5 / (nb_input + nb_output)))
#print('Glorot H: {}'.format(self.H))
if self.kernel_lr_multiplier == 'Glorot':
nb_input = int(input_dim * base)
nb_output = int(self.filters * base)
self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5/ (nb_input + nb_output)))
#print('Glorot learning rate multiplier: {}'.format(self.lr_multiplier))
self.kernel_constraint = Clip(-self.H, self.H)
self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
self.bias = self.add_weight((self.output_dim,),
initializer=self.bias_initializers,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.lr_multipliers = [self.kernel_lr_multiplier]
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
base = self.kernel_size[0] * self.kernel_size[1]
if self.H == 'Glorot':
nb_input = int(input_dim * base)
nb_output = int(self.filters * base)
self.H = np.float32(np.sqrt(1.5 / (nb_input + nb_output)))
#print('Glorot H: {}'.format(self.H))
if self.kernel_lr_multiplier == 'Glorot':
nb_input = int(input_dim * base)
nb_output = int(self.filters * base)
self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5/ (nb_input + nb_output)))
#print('Glorot learning rate multiplier: {}'.format(self.lr_multiplier))
self.kernel_constraint = Clip(-self.H, self.H)
self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
self.bias = self.add_weight((self.output_dim,),
initializer=self.bias_initializers,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.lr_multipliers = [self.kernel_lr_multiplier]
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True
def __init__(self,
learning_rate=None,
vocab_size=None,
embedding_size=None,
rnn_output_size=None,
dropout_rate=None,
bidirectional_rnn=None,
rnn_type=None,
rnn_layers=None,
l1_reg=None,
l2_reg=None,
initializer=None,
word_vector_init=None):
"""
If an arg is None, it will get its value from config.active_config.
"""
self._learning_rate = learning_rate or active_config().learning_rate
self._vocab_size = vocab_size or active_config().vocab_size
self._embedding_size = embedding_size or active_config().embedding_size
self._rnn_output_size = (rnn_output_size or
active_config().rnn_output_size)
self._dropout_rate = dropout_rate or active_config().dropout_rate
self._rnn_type = rnn_type or active_config().rnn_type
self._rnn_layers = rnn_layers or active_config().rnn_layers
self._word_vector_init = (word_vector_init or
active_config().word_vector_init)
self._initializer = initializer or active_config().initializer
if self._initializer == 'vinyals_uniform':
self._initializer = RandomUniform(-0.08, 0.08)
if bidirectional_rnn is None:
self._bidirectional_rnn = active_config().bidirectional_rnn
else:
self._bidirectional_rnn = bidirectional_rnn
l1_reg = l1_reg or active_config().l1_reg
l2_reg = l2_reg or active_config().l2_reg
self._regularizer = l1_l2(l1_reg, l2_reg)
self._keras_model = None
if self._vocab_size is None:
raise ValueError('config.active_config().vocab_size cannot be '
'None! You should check your config or you can '
'explicitly pass the vocab_size argument.')
if self._rnn_type not in ('lstm', 'gru'):
raise ValueError('rnn_type must be either "lstm" or "gru"!')
if self._rnn_layers < 1:
raise ValueError('rnn_layers must be >= 1!')
if self._word_vector_init is not None and self._embedding_size != 300:
raise ValueError('If word_vector_init is not None, embedding_size '
'must be 300')
def to_embedding(self, vector_dim=None, learn_difference=False, name=None,
embeddings_initializer='he_normal'):
from keras.layers import Embedding
W = None
if self.W is not None:
W = np.zeros((
self.size + len(self._special_tokens),
self.W.shape[1]
))
W[len(self._special_tokens):, :] = self.W
W = [W]
vector_dim = self.W.shape[1]
else:
if vector_dim is None:
ValueError('If container has no matrix W defined, vector '
'dimension for embedding must be explicitly '
'specified.')
emb = Embedding(
input_dim=self.size + len(self._special_tokens),
output_dim=vector_dim,
weights=W,
mask_zero=True,
name=name,
embeddings_initializer=embeddings_initializer
)
if learn_difference:
if W is None:
logger.warning('Learning a difference on top of non-pretrained '
'word vectors is not recommended')
from keras.models import Model
from keras.initializers import RandomUniform
from keras.layers import Input, add
emb.trainable = False
delta_initializer = RandomUniform(minval=-0.005, maxval=0.005)
if name is None:
name = emb.name
delta = Embedding(
input_dim=self.size + len(self._special_tokens),
output_dim=vector_dim,
embeddings_initializer=delta_initializer,
mask_zero=True,
name=name + '/delta_correction'
)
x = Input((None, ), dtype='int32', name=name + '/input')
e = add([emb(x), delta(x)], name=name + '/addition')
emb = Model(x, e, name='shifted_emb/' + name)
return emb