def __init__(self, filters_simple, filters_complex, nb_row, nb_col,
init='glorot_uniform', activation='relu', weights=None,
padding='valid', strides=(1, 1), data_format=K.image_data_format(),
kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
W_constraint=None, bias_constraint=None,
bias=True, **kwargs):
if padding not in {'valid', 'same'}:
raise Exception('Invalid border mode for Convolution2DEnergy:', padding)
self.filters_simple = filters_simple
self.filters_complex = filters_complex
self.nb_row = nb_row
self.nb_col = nb_col
self.init = initializers.get(init, data_format=data_format)
self.activation = activations.get(activation)
assert padding in {'valid', 'same'}, 'padding must be in {valid, same}'
self.padding = padding
self.strides = tuple(strides)
assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {tf, th}'
self.data_format = data_format
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.UnitNormOrthogonal(filters_complex, data_format)
self.bias_constraint = constraints.get(bias_constraint)
self.bias = bias
self.input_spec = [InputSpec(ndim=4)]
self.initial_weights = weights
super(Convolution2DEnergy, self).__init__(**kwargs)
python类get()的实例源码
def __init__(self, rank,
kernel_size=3,
data_format=None,
kernel_initialization=.1,
bias_initialization=1,
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(_ConvGDN, self).__init__(**kwargs)
self.rank = rank
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(1, rank, 'strides')
self.padding = conv_utils.normalize_padding('same')
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(1, rank, 'dilation_rate')
self.kernel_initializer = initializers.Constant(kernel_initialization)
self.bias_initializer = initializers.Constant(bias_initialization)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=self.rank + 2)
def __init__(self, filters,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
kernel_constraint=kconstraints.NonNeg(),
k_initializer='zeros',
k_regularizer=None,
k_constraint=None,
tied_k=False,
activity_regularizer=None,
strides=1,
padding='valid',
dilation_rate=1,
data_format=K.image_data_format(),
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(Conv2DSoftMinMax, self).__init__(**kwargs)
self.filters = filters
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.k_initializer = initializers.get(k_initializer)
self.k_regularizer = regularizers.get(k_regularizer)
self.k_constraint = constraints.get(k_constraint)
self.tied_k = tied_k
self.activity_regularizer = regularizers.get(activity_regularizer)
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate')
self.padding = conv_utils.normalize_padding(padding)
self.input_spec = InputSpec(min_ndim=2)
self.data_format = data_format
self.supports_masking = True
def __init__(self, init='one', power_init=1, weights=None, axis=-1, fit=True, **kwargs):
self.supports_masking = True
self.init = initializations.get(init)
self.initial_weights = weights
self.axis = axis
self.power_init = power_init
self.fit = fit
super(PowerReLU, self).__init__(**kwargs)
def __init__(self, quadratic_filters_ex=2, quadratic_filters_sup=2, W_quad_ex_initializer='glorot_uniform',
W_quad_sup_initializer='glorot_uniform', W_lin_initializer='glorot_uniform',
W_quad_ex_regularizer=None, W_quad_sup_regularizer=None, W_lin_regularizer=None,
W_quad_ex_constraint=None, W_quad_sup_constraint=None, W_lin_constraint=None,
**kwargs):
self.quadratic_filters_ex = quadratic_filters_ex
self.quadratic_filters_sup = quadratic_filters_sup
self.W_quad_ex_initializer = initializers.get(W_quad_ex_initializer)
self.W_quad_sup_initializer = initializers.get(W_quad_sup_initializer)
self.W_lin_initializer = initializers.get(W_lin_initializer)
self.W_quad_ex_constraint = constraints.get(W_quad_ex_constraint)
self.W_quad_sup_constraint = constraints.get(W_quad_sup_constraint)
self.W_lin_constraint = constraints.get(W_lin_constraint)
self.W_quad_ex_regularizer = regularizers.get(W_quad_ex_regularizer)
self.W_quad_sup_regularizer = regularizers.get(W_quad_sup_regularizer)
self.W_lin_regularizer = regularizers.get(W_lin_regularizer)
self.input_spec = [InputSpec(ndim=2)]
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(RustSTC, self).__init__(**kwargs)
def __init__(self, weights=None, kernel_initializer='glorot_uniform',
alpha_initializer='ones', alpha_regularizer=None, alpha_constraint=None,
beta_delta_initializer='ones', beta_delta_regularizer=None, beta_delta_constraint=None,
gamma_eta_initializer='ones', gamma_eta_regularizer=None, gamma_eta_constraint=None,
rho_initializer='ones', rho_regularizer=None, rho_constraint=None,
**kwargs):
self.alpha_initializer = initializers.get(alpha_initializer)
self.beta_delta_initializer = initializers.get(beta_delta_initializer)
self.gamma_eta_initializer = initializers.get(gamma_eta_initializer)
self.rho_initializer = initializers.get(rho_initializer)
self.alpha_constraint = constraints.get(alpha_constraint)
self.beta_delta_constraint = constraints.get(beta_delta_constraint)
self.gamma_eta_constraint = constraints.get(gamma_eta_constraint)
self.rho_constraint = constraints.get(rho_constraint)
self.alpha_regularizer = regularizers.get(alpha_regularizer)
self.beta_delta_regularizer = regularizers.get(beta_delta_regularizer)
self.gamma_eta_regularizer = regularizers.get(gamma_eta_regularizer)
self.rho_regularizer = regularizers.get(rho_regularizer)
self.input_spec = [InputSpec(ndim=2)]
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(NakaRushton, self).__init__(**kwargs)
def __init__(self, filters,
sum_axes,
filter_axes,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_activation=None,
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(FilterDims, self).__init__(**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.activation = activations.get(activation)
self.kernel_activation = activations.get(kernel_activation)
self.filters = filters
self.sum_axes = list(sum_axes)
self.sum_axes.sort()
self.filter_axes = list(filter_axes)
self.filter_axes.sort()
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.use_bias = use_bias
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def __init__(self, filters_simple,
filters_complex,
sum_axes,
filter_axes,
activation='relu',
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_activation=None,
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(FilterDimsV1, self).__init__(**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.activation = activations.get(activation)
self.kernel_activation = activations.get(kernel_activation)
self.filters_simple = filters_simple
self.filters_complex = filters_complex
self.sum_axes = list(sum_axes)
self.sum_axes.sort()
self.filter_axes = list(filter_axes)
self.filter_axes.sort()
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = kconstraints.UnitNormOrthogonal(self.filters_complex)
self.bias_constraint = constraints.get(bias_constraint)
self.use_bias = use_bias
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def __init__(self, output_dim, num_components, init='glorot_uniform', weights=None,
bias=True, input_dim=None, **kwargs):
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.output_dim = output_dim
self.input_dim = input_dim
self.num_components = num_components
self.bias = bias
self.initial_weights = weights
self.input_spec = [InputSpec(ndim=2)]
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(Dense, self).__init__(**kwargs)
def __init__(self, return_attention=False, **kwargs):
self.init = initializers.get('uniform')
self.supports_masking = True
self.return_attention = return_attention
super(AttentionWeightedAverage, self).__init__(** kwargs)
def __init__(self,
filters,
num_neighbors,
neighbors_ix_mat,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if K.backend() != 'theano':
raise Exception("GraphConv Requires Theano Backend.")
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(GraphConv, self).__init__(**kwargs)
self.filters = filters
self.num_neighbors = num_neighbors
self.neighbors_ix_mat = neighbors_ix_mat
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=3)
def __init__(self, epsilon=1e-3, axis=-1,
weights=None, beta_init='zero', gamma_init='one',
gamma_regularizer=None, beta_regularizer=None, **kwargs):
self.supports_masking = True
self.beta_init = initializers.get(beta_init)
self.gamma_init = initializers.get(gamma_init)
self.epsilon = epsilon
self.axis = axis
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.initial_weights = weights
super(FixedBatchNormalization, self).__init__(**kwargs)
def get_initial_state(self, inputs):
print('inputs shape:', inputs.get_shape())
# apply the matrix on the first time step to get the initial s0.
s0 = activations.tanh(K.dot(inputs[:, 0], self.W_s))
# from keras.layers.recurrent to initialize a vector of (batchsize,
# output_dim)
y0 = K.zeros_like(inputs) # (samples, timesteps, input_dims)
y0 = K.sum(y0, axis=(1, 2)) # (samples, )
y0 = K.expand_dims(y0) # (samples, 1)
y0 = K.tile(y0, [1, self.output_dim])
return [y0, s0]
def __init__(self, weights=None, axis=-1, momentum = 0.9, beta_init='zero', gamma_init='one', **kwargs):
self.momentum = momentum
self.axis = axis
self.beta_init = initializations.get(beta_init)
self.gamma_init = initializations.get(gamma_init)
self.initial_weights = weights
super(Scale, self).__init__(**kwargs)
def __init__(self,
init='glorot_uniform',
activation=None,
weights=None,
W_regularizer=None,
b_regularizer=None,
activity_regularizer=None,
W_constraint=None,
b_constraint=None,
bias=True,
input_dim=None,
**kwargs):
self.init = initializers.get(init)
self.activation = activations.get(activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.initial_weights = weights
self.input_spec = InputSpec(ndim=2)
self.input_dim = input_dim
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(Highway, self).__init__(**kwargs)
def __init__(self, output_dim, support=1, init='glorot_uniform',
activation='linear', weights=None, W_regularizer=None,
b_regularizer=None, bias=False, **kwargs):
self.init = initializers.get(init)
self.activation = activations.get(activation)
self.output_dim = output_dim # number of features per node
self.support = support # filter support / number of weights
assert support >= 1
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.bias = bias
self.initial_weights = weights
# these will be defined during build()
self.input_dim = None
self.W = None
self.b = None
super(GraphConvolution, self).__init__(**kwargs)
# def get_output_shape_for(self, input_shapes):
# features_shape = input_shapes[0]
# output_shape = (features_shape[0], self.output_dim)
# return output_shape # (batch_size, output_dim)
def __init__(self, weights=None, axis=-1, momentum=0.9, beta_init='zero', gamma_init='one', **kwargs):
self.momentum = momentum
self.axis = axis
self.beta_init = initializations.get(beta_init)
self.gamma_init = initializations.get(gamma_init)
self.initial_weights = weights
super(Scale, self).__init__(**kwargs)
def __init__(self,
kernel_initializer=initializers.Constant(1.0),
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer='zeros',
bias_regularizer=None,
bias_constraint=None,
**kwargs):
super(Scale, self).__init__(**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_initializer = initializers.get(bias_initializer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.bias_constraint = constraints.get(bias_constraint)
def __init__(self, step_dim,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
"""
Keras Layer that implements an Attention mechanism for temporal data.
Supports Masking.
Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
:param kwargs:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Example:
model.add(LSTM(64, return_sequences=True))
model.add(Attention(step_dim))
"""
self.supports_masking = True
# self.init = initializations.get('glorot_uniform')
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
super(Attention, self).__init__(**kwargs)
mf_lstm_att_sia_self.py 文件源码
项目:kaggle-quora-solution-8th
作者: qqgeogor
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def __init__(self, step_dim,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
"""
Keras Layer that implements an Attention mechanism for temporal data.
Supports Masking.
Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
:param kwargs:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Example:
model.add(LSTM(64, return_sequences=True))
model.add(Attention())
"""
self.supports_masking = True
#self.init = initializations.get('glorot_uniform')
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
super(Attention, self).__init__(**kwargs)