def __init__(self, output_dim, init='glorot_uniform', activation='relu',weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None, input_dim=None, **kwargs):
self.W_initializer = initializers.get(init)
self.b_initializer = initializers.get('zeros')
self.activation = activations.get(activation)
self.output_dim = output_dim
self.input_dim = input_dim
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.initial_weights = weights
self.input_spec = InputSpec(ndim=2)
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(SparseFullyConnectedLayer, self).__init__(**kwargs)
python类get()的实例源码
itosfm.py 文件源码
项目:State-Frequency-Memory-stock-prediction
作者: z331565360
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def __init__(self, output_dim, freq_dim, hidden_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.freq_dim = freq_dim
self.hidden_dim = hidden_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(ITOSFM, self).__init__(**kwargs)
itosfm.py 文件源码
项目:State-Frequency-Memory-stock-prediction
作者: z331565360
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def __init__(self, output_dim, freq_dim, hidden_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.freq_dim = freq_dim
self.hidden_dim = hidden_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(ITOSFM, self).__init__(**kwargs)
def __init__(self, output_dim, memory_dim=128, memory_size=20,
controller_output_dim=100, location_shift_range=1,
num_read_head=1, num_write_head=1,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, R_regularizer=None,
b_regularizer=None, W_y_regularizer=None,
W_xi_regularizer=None, W_r_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(NTM, self).__init__(**kwargs)
def __init__(self, filters,
centers_initializer='zeros',
centers_regularizer=None,
centers_constraint=None,
stds_initializer='ones',
stds_regularizer=None,
stds_constraint=None,
gauss_scale=100,
**kwargs):
self.filters = filters
self.gauss_scale = gauss_scale
super(GaussianReceptiveFields, self).__init__(**kwargs)
self.centers_initializer = initializers.get(centers_initializer)
self.stds_initializer = initializers.get(stds_initializer)
self.centers_regularizer = regularizers.get(centers_regularizer)
self.stds_regularizer = regularizers.get(stds_regularizer)
self.centers_constraint = constraints.get(centers_constraint)
self.stds_constraint = constraints.get(stds_constraint)
def __init__(self, quadratic_filters=2, init='glorot_uniform', weights=None,
W_quad_regularizer=None, W_lin_regularizer=None, activity_regularizer=None,
W_quad_constraint=None, W_lin_constraint=None,
bias=True, input_dim=None, **kwargs):
self.init = initializations.get(init)
self.quadratic_filters = quadratic_filters
self.input_dim = input_dim
self.W_quad_regularizer = regularizers.get(W_quad_regularizer)
self.W_lin_regularizer = regularizers.get(W_lin_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_quad_constraint = constraints.get(W_quad_constraint)
self.W_lin_constraint = constraints.get(W_lin_constraint)
self.initial_weights = weights
self.input_spec = [InputSpec(ndim=2)]
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(GQM, self).__init__(**kwargs)
def __init__(self, activation='linear',
bias_regularizer=None,
bias_constraint=None,
bias_initializer='zeros',
use_bias=True, input_dim=None, **kwargs):
self.activation = activations.get(activation)
self.input_dim = input_dim
self.bias_initializer = initializers.get(bias_initializer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.bias_constraint = constraints.get(bias_constraint)
self.use_bias = use_bias
self.input_spec = [InputSpec(ndim=2)]
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(EminusS, self).__init__(**kwargs)
def __init__(self, quadratic_filters=2, init='glorot_uniform', weights=None,
W_quad_regularizer=None, W_lin_regularizer=None, activity_regularizer=None,
W_quad_constraint=None, W_lin_constraint=None,
bias=True, input_dim=None, **kwargs):
self.init = initializations.get(init)
self.quadratic_filters = quadratic_filters
self.input_dim = input_dim
self.W_quad_regularizer = regularizers.get(W_quad_regularizer)
self.W_lin_regularizer = regularizers.get(W_lin_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_quad_constraint = constraints.get(W_quad_constraint)
self.W_lin_constraint = constraints.get(W_lin_constraint)
self.initial_weights = weights
self.input_spec = [InputSpec(ndim=5)]
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(GQM_conv, self).__init__(**kwargs)
def __init__(self, quadratic_filters=2, init='glorot_uniform', weights=None,
W_quad_regularizer=None, W_lin_regularizer=None, activity_regularizer=None,
W_quad_constraint=None, W_lin_constraint=None,
bias=True, input_dim=None, **kwargs):
self.init = initializations.get(init)
self.quadratic_filters = quadratic_filters
self.input_dim = input_dim
self.W_quad_regularizer = regularizers.get(W_quad_regularizer)
self.W_lin_regularizer = regularizers.get(W_lin_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_quad_constraint = constraints.get(W_quad_constraint)
self.W_lin_constraint = constraints.get(W_lin_constraint)
self.initial_weights = weights
self.input_spec = [InputSpec(ndim=5)]
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(GQM_4D, self).__init__(**kwargs)
def __init__(self, units,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
kernel_constraint=constraints.NonNeg(),
k_initializer='zeros',
k_regularizer=None,
k_constraint=None,
tied_k=False,
activity_regularizer=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(SoftMinMax, self).__init__(**kwargs)
self.units = units
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.k_initializer = initializers.get(k_initializer)
self.k_regularizer = regularizers.get(k_regularizer)
self.k_constraint = constraints.get(k_constraint)
self.tied_k = tied_k
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def __init__(self, units,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
kernel_constraint=constraints.NonNeg(),
activity_regularizer=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(WeightedMean, self).__init__(**kwargs)
self.units = units
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def __init__(self, output_dim, init='glorot_uniform',
activation=None, weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, input_dim=None, **kwargs):
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.output_dim = output_dim
self.input_dim = input_dim
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.initial_weights = weights
self.input_spec = [InputSpec(ndim='2+')]
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(DenseNonNeg, self).__init__(**kwargs)
def __init__(self, init='glorot_uniform',
activation=None, weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, input_dim=None, **kwargs):
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.input_dim = input_dim
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.initial_weights = weights
self.input_spec = [InputSpec(ndim='2+')]
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(Feedback, self).__init__(**kwargs)
def __init__(self, init='glorot_uniform',
activation=None, weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, input_dim=None, **kwargs):
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.input_dim = input_dim
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.initial_weights = weights
self.input_spec = [InputSpec(ndim='2+')]
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(DivisiveNormalization, self).__init__(**kwargs)
def __init__(self, output_dim, inner_dim, depth = 2, init_output='uniform',
activation_output='softmax', init_inner='identity',
activation_inner='linear', scale_output=0.01, padding=False, **kwargs):
if depth < 1:
quit('Cannot use GraphFP with depth zero')
self.init_output = initializations.get(init_output)
self.activation_output = activations.get(activation_output)
self.init_inner = initializations.get(init_inner)
self.activation_inner = activations.get(activation_inner)
self.output_dim = output_dim
self.inner_dim = inner_dim
self.depth = depth
self.scale_output = scale_output
self.padding = padding
self.initial_weights = None
self.input_dim = 4 # each entry is a 3D N_atom x N_atom x N_feature tensor
if self.input_dim:
kwargs['input_shape'] = (None, None, None,) # 3D tensor for each input
#self.input = K.placeholder(ndim = 4)
super(GraphFP, self).__init__(**kwargs)
def __init__(self, output_dim, inner_dim, depth = 2, init_output='uniform',
activation_output='softmax', init_inner='identity',
activation_inner='linear', scale_output=0.01, padding=False, **kwargs):
if depth < 1:
quit('Cannot use GraphFP with depth zero')
self.init_output = initializations.get(init_output)
self.activation_output = activations.get(activation_output)
self.init_inner = initializations.get(init_inner)
self.activation_inner = activations.get(activation_inner)
self.output_dim = output_dim
self.inner_dim = inner_dim
self.depth = depth
self.scale_output = scale_output
self.padding = padding
self.initial_weights = None
self.input_dim = 4 # each entry is a 3D N_atom x N_atom x N_feature tensor
if self.input_dim:
kwargs['input_shape'] = (None, None, None,) # 3D tensor for each input
#self.input = K.placeholder(ndim = 4)
super(GraphFP, self).__init__(**kwargs)
def __init__(self,output_dim,mem_vec_dim,init='glorot_uniform', activation='linear', weights=None,
activity_regularizer=None,input_dim=None, **kwargs):
'''
Params:
output_dim: ?????
mem_vec_dim: query?????
'''
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.output_dim = output_dim
self.input_dim = input_dim
self.mem_vector_dim=mem_vec_dim
self.activity_regularizer = regularizers.get(activity_regularizer)
self.initial_weights = weights
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(MemoryNet,self).__init__(**kwargs)
def __init__(self, output_dim, L,
init='glorot_uniform', inner_init='orthogonal',
activation='tanh', inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
self.L = L
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(RHN, self).__init__(**kwargs)
def __init__(self, downsampling_factor=10, init='glorot_uniform', activation='linear',
weights=None, W_regularizer=None, activity_regularizer=None,
W_constraint=None, input_dim=None, **kwargs):
self.downsampling_factor = downsampling_factor
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.initial_weights = weights
self.input_dim = input_dim
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
self.input_spec = [InputSpec(ndim=4)]
super(EltWiseProduct, self).__init__(**kwargs)
def __init__(self,
W_regularizer=None, u_regularizer=None, b_regularizer=None,
W_constraint=None, u_constraint=None, b_constraint=None,
W_dropout=0., u_dropout=0., bias=True, **kwargs):
self.supports_masking = True
self.W_init = initializers.get('orthogonal')
self.u_init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.u_regularizer = regularizers.get(u_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.u_constraint = constraints.get(u_constraint)
self.b_constraint = constraints.get(b_constraint)
self.W_dropout = min(1., max(0., W_dropout))
self.u_dropout = min(1., max(0., u_dropout))
self.bias = bias
super(AttentionWithContext, self).__init__(**kwargs)
layer_normalization_RNN.py 文件源码
项目:New_Layers-Keras-Tensorflow
作者: WeidiXie
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
activation='tanh', beta_init='zero', gamma_init='one',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
gamma_regularizer=None, beta_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.activation = activations.get(activation)
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.beta_init = initializations.get(beta_init)
self.gamma_init = initializations.get(gamma_init)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.dropout_W = dropout_W
self.dropout_U = dropout_U
self.epsilon = 1e-5
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(LN_SimpleRNN, self).__init__(**kwargs)
def __init__(self, input_dim, output_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid'):
#self.input_dim = input_dim
self.output_dim = int(output_dim / 2)
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.input_dim = input_dim
#self.input = K.placeholder(input_shape)
# initial states: 2 all-zero tensor of shape (output_dim)
self.forward_lstm = LSTM(input_dim = input_dim, output_dim = self.output_dim)
self.backward_lstm = LSTM(input_dim = input_dim, output_dim = self.output_dim)
self.params = self.forward_lstm.params + self.backward_lstm.params
#if self.initial_weights is not None:
# self.set_weights(self.initial_weights)
# del self.initial_weights
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(DualCurrent, self).__init__(**kwargs)
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
activation='tanh', inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
shape_key=None, dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
self.shape_key = shape_key or {}
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
kwargs['consume_less'] = 'gpu'
super(RTTN, self).__init__(**kwargs)
self.num_actions = 4
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(DecoderVaeLSTM, self).__init__(**kwargs)
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W = dropout_W
self.dropout_U = dropout_U
self.stateful = False
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(QRNN, self).__init__(**kwargs)
def __init__(self, output_dim, window_size=3, stride=1,
kernel_initializer='uniform', bias_initializer='zero',
activation='linear', activity_regularizer=None,
kernel_regularizer=None, bias_regularizer=None,
kernel_constraint=None, bias_constraint=None,
use_bias=True, input_dim=None, input_length=None, **kwargs):
self.output_dim = output_dim
self.window_size = window_size
self.strides = (stride, 1)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.activation = activations.get(activation)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = [InputSpec(ndim=3)]
self.input_dim = input_dim
self.input_length = input_length
if self.input_dim:
kwargs['input_shape'] = (self.input_length, self.input_dim)
super(GCNN, self).__init__(**kwargs)
def __init__(self, units, window_size=2, stride=1,
return_sequences=False, go_backwards=False,
stateful=False, unroll=False, activation='tanh',
kernel_initializer='uniform', bias_initializer='zero',
kernel_regularizer=None, bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None, bias_constraint=None,
dropout=0, use_bias=True, input_dim=None, input_length=None,
**kwargs):
self.return_sequences = return_sequences
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.units = units
self.window_size = window_size
self.strides = (stride, 1)
self.use_bias = use_bias
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = dropout
self.supports_masking = True
self.input_spec = [InputSpec(ndim=3)]
self.input_dim = input_dim
self.input_length = input_length
if self.input_dim:
kwargs['input_shape'] = (self.input_length, self.input_dim)
super(QRNN, self).__init__(**kwargs)
def __init__(self, filters, kernel_size,
kernel_initializer='glorot_uniform', activation=None, weights=None,
padding='valid', strides=(1, 1), data_format=None,
kernel_regularizer=None, bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None, bias_constraint=None,
use_bias=True, **kwargs):
if data_format is None:
data_format = K.image_data_format()
if padding not in {'valid', 'same', 'full'}:
raise ValueError('Invalid border mode for CosineConvolution2D:', padding)
self.filters = filters
self.kernel_size = kernel_size
self.nb_row, self.nb_col = self.kernel_size
self.kernel_initializer = initializers.get(kernel_initializer)
self.activation = activations.get(activation)
self.padding = padding
self.strides = tuple(strides)
self.data_format = normalize_data_format(data_format)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.use_bias = use_bias
self.input_spec = [InputSpec(ndim=4)]
self.initial_weights = weights
super(CosineConvolution2D, self).__init__(**kwargs)
def __init__(self, output_dim=None, activation=None, **kwargs):
self.output_dim = output_dim
self.activation = activations.get(activation)
super().__init__(**kwargs)