def __init__(self, output_dim, num_senses, num_hyps, use_attention=False, return_attention=False, **kwargs):
# Set output_dim in kwargs so that we can pass it along to LSTM's init
kwargs['output_dim'] = output_dim
self.num_senses = num_senses
self.num_hyps = num_hyps
self.use_attention = use_attention
self.return_attention = return_attention
super(OntoAttentionLSTM, self).__init__(**kwargs)
# Recurrent would have set the input shape to cause the input dim to be 3. Change it.
self.input_spec = [InputSpec(ndim=5)]
if self.consume_less == "cpu":
# In the LSTM implementation in Keras, consume_less = cpu causes all gates' inputs to be precomputed
# and stored in memory. However, this doesn't work with OntoLSTM since the input to the gates is
# dependent on the previous timestep's output.
warnings.warn("OntoLSTM does not support consume_less = cpu. Changing it to mem.")
self.consume_less = "mem"
#TODO: Remove this dependency.
if K.backend() == "tensorflow" and not self.unroll:
warnings.warn("OntoLSTM does not work with unroll=False when backend is TF. Changing it to True.")
self.unroll = True
python类InputSpec()的实例源码
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
input_dim = input_shape[-1]
reader_input_shape = self.get_reader_input_shape(input_shape)
print >>sys.stderr, "NSE reader input shape:", reader_input_shape
writer_input_shape = (input_shape[0], 1, self.output_dim * 2) # Will process one timestep at a time
print >>sys.stderr, "NSE writer input shape:", writer_input_shape
composer_input_shape = self.get_composer_input_shape(input_shape)
print >>sys.stderr, "NSE composer input shape:", composer_input_shape
self.reader.build(reader_input_shape)
self.writer.build(writer_input_shape)
self.composer.build(composer_input_shape)
# Aggregate weights of individual components for this layer.
reader_weights = self.reader.trainable_weights
writer_weights = self.writer.trainable_weights
composer_weights = self.composer.trainable_weights
self.trainable_weights = reader_weights + writer_weights + composer_weights
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def build(self, input_shape):
if not self.recurrent_layer.built:
self.recurrent_layer.build(input_shape)
recurrent_output_shapes = self.recurrent_layer.compute_output_shape(
input_shape
)
if self.return_sequences:
if not self.dense_layer.built:
self.dense_layer.build((
recurrent_output_shapes[0],
recurrent_output_shapes[2]
))
elif not self.dense_layer.built:
self.dense_layer.build(recurrent_output_shapes)
super(RNNCell, self).build(input_shape)
batch_size = input_shape[0] if self.stateful else None
self.dense_state_spec = InputSpec(
shape=(batch_size, self.dense_layer.units)
)
self.dense_state = None
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.g = self.add_weight(shape=(self.units,),
initializer='one',
name='g')
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def __init__(self, init='glorot_uniform',
U_regularizer=None, b_start_regularizer=None, b_end_regularizer=None,
U_constraint=None, b_start_constraint=None, b_end_constraint=None,
weights=None,
**kwargs):
self.supports_masking = True
self.uses_learning_phase = True
self.input_spec = [InputSpec(ndim=3)]
self.init = initializations.get(init)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_start_regularizer = regularizers.get(b_start_regularizer)
self.b_end_regularizer = regularizers.get(b_end_regularizer)
self.U_constraint = constraints.get(U_constraint)
self.b_start_constraint = constraints.get(b_start_constraint)
self.b_end_constraint = constraints.get(b_end_constraint)
self.initial_weights = weights
super(ChainCRF, self).__init__(**kwargs)
def __init__(self,
padding=(1, 1),
data_format=None,
**kwargs):
super(ReflectionPadding2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding))
elif hasattr(padding, '__len__'):
if len(padding) != 2:
raise ValueError('`padding` should have two elements. '
'Found: ' + str(padding))
height_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
width_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
self.padding = (height_padding, width_padding)
else:
raise ValueError('`padding` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_pad, symmetric_width_pad), '
'or a tuple of 2 tuples of 2 ints '
'((top_pad, bottom_pad), (left_pad, right_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=4)
def __init__(self,
kernel_size=3,
kernel_initialization=.1,
bias_initialization=1,
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(GDNConv1D, self).__init__(
rank=1,
kernel_size=kernel_size,
data_format='channels_last',
kernel_initialization=kernel_initialization,
bias_initialization=bias_initialization,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
self.input_spec = InputSpec(ndim=3)
def __init__(self,
kernel_size=(3, 3),
data_format=None,
kernel_initialization=.1,
bias_initialization=1,
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(GDNConv2D, self).__init__(
rank=2,
kernel_size=kernel_size,
data_format=data_format,
kernel_initialization=kernel_initialization,
bias_initialization=bias_initialization,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
self.input_spec = InputSpec(ndim=4)
def __init__(self,
kernel_size=(3, 3, 3),
data_format=None,
kernel_initialization=.1,
bias_initialization=1,
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(GDNConv3D, self).__init__(
rank=3,
kernel_size=kernel_size,
data_format=data_format,
kernel_initialization=kernel_initialization,
bias_initialization=bias_initialization,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
self.input_spec = InputSpec(ndim=5)
def build(self, input_shape):
alpha_shape = input_shape[self.axis]
self.alpha = self.init((alpha_shape,),
name='alpha_pos'.format(self.name))
self.rho = K.variable(self.power_init * np.ones(alpha_shape),
name='rho_pos'.format(self.name))
if self.fit:
self.trainable_weights = [self.alpha, self.rho]
self.input_spec = [InputSpec(dtype=K.floatx(),
shape=input_shape)]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def __init__(self, quadratic_filters=2, init='glorot_uniform', weights=None,
W_quad_regularizer=None, W_lin_regularizer=None, activity_regularizer=None,
W_quad_constraint=None, W_lin_constraint=None,
bias=True, input_dim=None, **kwargs):
self.init = initializations.get(init)
self.quadratic_filters = quadratic_filters
self.input_dim = input_dim
self.W_quad_regularizer = regularizers.get(W_quad_regularizer)
self.W_lin_regularizer = regularizers.get(W_lin_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_quad_constraint = constraints.get(W_quad_constraint)
self.W_lin_constraint = constraints.get(W_lin_constraint)
self.initial_weights = weights
self.input_spec = [InputSpec(ndim=2)]
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(GQM, self).__init__(**kwargs)
def __init__(self, activation='linear',
bias_regularizer=None,
bias_constraint=None,
bias_initializer='zeros',
use_bias=True, input_dim=None, **kwargs):
self.activation = activations.get(activation)
self.input_dim = input_dim
self.bias_initializer = initializers.get(bias_initializer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.bias_constraint = constraints.get(bias_constraint)
self.use_bias = use_bias
self.input_spec = [InputSpec(ndim=2)]
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(EminusS, self).__init__(**kwargs)
def __init__(self, quadratic_filters=2, init='glorot_uniform', weights=None,
W_quad_regularizer=None, W_lin_regularizer=None, activity_regularizer=None,
W_quad_constraint=None, W_lin_constraint=None,
bias=True, input_dim=None, **kwargs):
self.init = initializations.get(init)
self.quadratic_filters = quadratic_filters
self.input_dim = input_dim
self.W_quad_regularizer = regularizers.get(W_quad_regularizer)
self.W_lin_regularizer = regularizers.get(W_lin_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_quad_constraint = constraints.get(W_quad_constraint)
self.W_lin_constraint = constraints.get(W_lin_constraint)
self.initial_weights = weights
self.input_spec = [InputSpec(ndim=5)]
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(GQM_4D, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.tied_k:
k_size = (1,)
else:
k_size = (self.units,)
self.k = self.add_weight(shape=k_size,
initializer=self.k_initializer,
name='k',
regularizer=self.k_regularizer,
constraint=self.k_constraint)
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def __init__(self, units,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
kernel_constraint=constraints.NonNeg(),
activity_regularizer=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(WeightedMean, self).__init__(**kwargs)
self.units = units
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def __init__(self, output_dim, init='glorot_uniform',
activation=None, weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, input_dim=None, **kwargs):
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.output_dim = output_dim
self.input_dim = input_dim
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.initial_weights = weights
self.input_spec = [InputSpec(ndim='2+')]
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(DenseNonNeg, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.input_dim = input_dim
self.input_spec = [InputSpec(dtype=K.floatx(),
ndim='2+')]
self.W = self.add_weight((input_dim, self.output_dim),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((self.output_dim,),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.input_dim = input_dim
self.input_spec = [InputSpec(dtype=K.floatx(),
ndim='2+')]
self.W = self.add_weight((input_dim, input_dim),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((input_dim,),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def __init__(self, init='glorot_uniform',
activation=None, weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, input_dim=None, **kwargs):
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.input_dim = input_dim
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.initial_weights = weights
self.input_spec = [InputSpec(ndim='2+')]
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(DivisiveNormalization, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.input_dim = input_dim
self.input_spec = [InputSpec(dtype=K.floatx(),
ndim='2+')]
self.W = self.add_weight((input_dim, input_dim),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((input_dim,),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def __init__(self, init='glorot_uniform',
U_regularizer=None, b_start_regularizer=None, b_end_regularizer=None,
U_constraint=None, b_start_constraint=None, b_end_constraint=None,
weights=None,
**kwargs):
self.supports_masking = True
self.uses_learning_phase = True
self.input_spec = [InputSpec(ndim=3)]
self.init = initializations.get(init)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_start_regularizer = regularizers.get(b_start_regularizer)
self.b_end_regularizer = regularizers.get(b_end_regularizer)
self.U_constraint = constraints.get(U_constraint)
self.b_start_constraint = constraints.get(b_start_constraint)
self.b_end_constraint = constraints.get(b_end_constraint)
self.initial_weights = weights
super(ChainCRF, self).__init__(**kwargs)
discrimination.py 文件源码
项目:Keras-GAN-Animeface-Character
作者: forcecore
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def __init__(self, nb_kernels, kernel_dim, init='glorot_uniform', weights=None,
W_regularizer=None, activity_regularizer=None,
W_constraint=None, input_dim=None, **kwargs):
self.init = initializers.get(init)
self.nb_kernels = nb_kernels
self.kernel_dim = kernel_dim
self.input_dim = input_dim
self.W_regularizer = regularizers.get(W_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.initial_weights = weights
self.input_spec = [InputSpec(ndim=2)]
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(MinibatchDiscrimination, self).__init__(**kwargs)
discrimination.py 文件源码
项目:Keras-GAN-Animeface-Character
作者: forcecore
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def build(self, input_shape):
assert len(input_shape) == 2
input_dim = input_shape[1]
self.input_spec = [InputSpec(dtype=K.floatx(),
shape=(None, input_dim))]
self.W = self.add_weight(shape=(self.nb_kernels, input_dim, self.kernel_dim),
initializer=self.init,
name='kernel',
regularizer=self.W_regularizer,
trainable=True,
constraint=self.W_constraint)
# Set built to true.
super(MinibatchDiscrimination, self).build(input_shape)
def build(self, input_shape):
input_shape = list(input_shape)
input_shape = input_shape[:1] + [self.output_length] + input_shape[1:]
if not self.hidden_dim:
self.hidden_dim = input_shape[-1]
output_dim = input_shape[-1]
self.output_dim = self.hidden_dim
initial_weights = self.initial_weights
self.initial_weights = None
super(LSTMDecoder, self).build(input_shape)
self.output_dim = output_dim
self.initial_weights = initial_weights
self.W_y = self.init((self.hidden_dim, self.output_dim), name='{}_W_y'.format(self.name))
self.b_y = K.zeros((self.output_dim), name='{}_b_y'.format(self.name))
self.trainable_weights += [self.W_y, self.b_y]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
input_shape.pop(1)
self.input_spec = [InputSpec(shape=tuple(input_shape))]
def __init__(self, init='glorot_uniform',
U_regularizer=None,
b_start_regularizer=None,
b_end_regularizer=None,
U_constraint=None,
b_start_constraint=None,
b_end_constraint=None,
weights=None,
**kwargs):
super(ChainCRF, self).__init__(**kwargs)
self.init = initializers.get(init)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_start_regularizer = regularizers.get(b_start_regularizer)
self.b_end_regularizer = regularizers.get(b_end_regularizer)
self.U_constraint = constraints.get(U_constraint)
self.b_start_constraint = constraints.get(b_start_constraint)
self.b_end_constraint = constraints.get(b_end_constraint)
self.initial_weights = weights
self.supports_masking = True
self.uses_learning_phase = True
self.input_spec = [InputSpec(ndim=3)]
KerasBatchNormalization.py 文件源码
项目:audit-log-detection
作者: twosixlabs
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
shape = (input_shape[self.axis],)
self.gamma = self.gamma_init(shape, name='{}_gamma'.format(self.name))
self.beta = self.beta_init(shape, name='{}_beta'.format(self.name))
self.trainable_weights = [self.gamma, self.beta]
self.running_mean = K.zeros(shape,
name='{}_running_mean'.format(self.name))
self.running_std = K.ones(shape,
name='{}_running_std'.format(self.name))
self.non_trainable_weights = [self.running_mean, self.running_std]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
self.called_with = None
def __init__(self, recurrent_layer,
return_attention=False,
concatenate_input=True,
attend_after=False,
**kwargs):
super(RecurrentAttention, self).__init__(**kwargs)
self.recurrent_layer = self.add_child(
'recurrent_layer',
recurrent_layer
)
self.return_attention = return_attention
self.concatenate_input = concatenate_input
self.attend_after = attend_after
self.input_spec = [InputSpec(ndim=3), None]
self._attended_spec = InputSpec(ndim=2)
self._attention_step_output_spec = InputSpec(ndim=2)
self._attention_state_spec = [InputSpec(ndim=2)]
self._attention_states = [None]
# will be set in call, then passed to step by get_constants
self._attended = None
def __init__(
self,
n_components,
alpha_activation=None,
beta_activation=None,
kappa_activation=None,
*args,
**kwargs
):
super(GravesSequenceAttention, self).__init__(*args, **kwargs)
self.distribution = AlexGravesSequenceAttentionParams(
n_components,
alpha_activation,
beta_activation,
kappa_activation,
)
self._attention_states = [None, None]
self._attention_state_spec = [
InputSpec(ndim=2), # attention (tm1)
InputSpec(shape=(None, 1)) # kappa
]
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
if self.stateful:
self.reset_states()
else:
# initial states: all-zero tensor of shape (output_dim)
self.states = [None]
input_dim = input_shape[2]
self.input_dim = input_dim
self.U = self.add_weight((self.output_dim, self.output_dim),
initializer=self.inner_init,
name='{}_U'.format(self.name),
regularizer=self.U_regularizer)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def build(self, input_shape):
self.input_spec = [InputSpec(ndim=3)]
if K._BACKEND == 'tensorflow':
if not input_shape[1]:
raise Exception('When using TensorFlow, you should define '
'explicitly the number of timesteps of '
'your sequences.\n'
'If your first layer is an Embedding, '
'make sure to pass it an "input_length" '
'argument. Otherwise, make sure '
'the first layer has '
'an "input_shape" or "batch_input_shape" '
'argument, including the time axis.')
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = True
super(ProbabilityTensor, self).build()