def __init__(self,
W_regularizer=None, u_regularizer=None, b_regularizer=None,
W_constraint=None, u_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializations.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.u_regularizer = regularizers.get(u_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.u_constraint = constraints.get(u_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(AttentionWithContext, self).__init__(**kwargs)
python类get()的实例源码
itosfm.py 文件源码
项目:State-Frequency-Memory-stock-prediction
作者: z331565360
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def __init__(self, output_dim, freq_dim, hidden_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.freq_dim = freq_dim
self.hidden_dim = hidden_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(ITOSFM, self).__init__(**kwargs)
itosfm.py 文件源码
项目:State-Frequency-Memory-stock-prediction
作者: z331565360
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def __init__(self, output_dim, freq_dim, hidden_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.freq_dim = freq_dim
self.hidden_dim = hidden_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(ITOSFM, self).__init__(**kwargs)
def __init__(self, init='glorot_uniform',
U_regularizer=None, b_start_regularizer=None, b_end_regularizer=None,
U_constraint=None, b_start_constraint=None, b_end_constraint=None,
weights=None,
**kwargs):
self.supports_masking = True
self.uses_learning_phase = True
self.input_spec = [InputSpec(ndim=3)]
self.init = initializations.get(init)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_start_regularizer = regularizers.get(b_start_regularizer)
self.b_end_regularizer = regularizers.get(b_end_regularizer)
self.U_constraint = constraints.get(U_constraint)
self.b_start_constraint = constraints.get(b_start_constraint)
self.b_end_constraint = constraints.get(b_end_constraint)
self.initial_weights = weights
super(ChainCRF, self).__init__(**kwargs)
def __init__(self, output_dim, memory_dim=128, memory_size=20,
controller_output_dim=100, location_shift_range=1,
num_read_head=1, num_write_head=1,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, R_regularizer=None,
b_regularizer=None, W_y_regularizer=None,
W_xi_regularizer=None, W_r_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(NTM, self).__init__(**kwargs)
def __init__(self,
input_channels,
output_shape,
num_filters,
layer_sizes,
init="glorot_uniform"):
"""
Parameters
----------
output_shape : list_like
Size of the generated matrix (x, y)
layer_sizes : array_like
List of nodes in hidden layers
init : str
Keras initializer to use for weights
"""
self.input_channels = input_channels
self.num_filters = num_filters
self.output_shape = output_shape
self.layer_sizes = layer_sizes
self.init = initializations.get(init)
self.bias_init = initializations.get("zero")
self.setup_weights()
self.setup_output()
def __init__(self,
output_shape,
layer_sizes,
scale,
init="glorot_uniform"):
"""
Parameters
----------
output_shape : list_like
Size of the generated matrix (x, y)
layer_sizes : array_like
List of nodes in hidden layers
init : str
Keras initializer to use for weights
"""
self.output_shape = output_shape
self.layer_sizes = layer_sizes
self.init = initializations.get(init)
self.bias_init = initializations.get("zero")
self.scale = scale
self.setup_weights()
self.setup_output()
def __init__(self, filter_size, input_shape, filters_in, batch_size):
"""
Parameters:
-----------
filter_size : int
Size of the filter in 1 dimension (total = filter_size ** 2)
input_shape: list_like
Size of input image this filter is working on. This is used for
generating separate filters for each pixel position of the image
filter_in : int
Number of channels in input
batch_size : int
Batch size
"""
self.filter_size = filter_size
self.input_shape = input_shape
self.filters_in = filters_in
self.batch_size = batch_size
self.init = initializations.get("glorot_uniform")
self.b_init = initializations.get("zero")
self.setup_weights()
def __init__(self, filter_size, input_shape, filters_in, batch_size):
"""
Parameters:
-----------
filter_size : int
Size of the filter in 1 dimension (total = filter_size ** 2)
input_shape: list_like
Size of input image this filter is working on. This is used for
generating separate filters for each pixel position of the image
filter_in : int
Number of channels in input
batch_size : int
Batch size
"""
self.filter_size = filter_size
self.input_shape = input_shape
self.filters_in = filters_in
self.batch_size = batch_size
self.init = initializations.get("glorot_uniform")
self.b_init = initializations.get("zero")
self.setup_weights()
def __init__(self, filter_size, input_shape, filters_in, batch_size):
"""
Parameters:
-----------
filter_size : int
Size of the filter in 1 dimension (total = filter_size ** 2)
input_shape: list_like
Size of input image this filter is working on. This is used for
generating separate filters for each pixel position of the image
filter_in : int
Number of channels in input
batch_size : int
Batch size
"""
self.filter_size = filter_size
self.input_shape = input_shape
self.filters_in = filters_in
self.batch_size = batch_size
self.init = initializations.get("glorot_uniform")
self.b_init = initializations.get("zero")
self.setup_weights()
def __init__(self,output_dim,mem_vec_dim,init='glorot_uniform', activation='linear', weights=None,
activity_regularizer=None,input_dim=None, **kwargs):
'''
Params:
output_dim: ?????
mem_vec_dim: query?????
'''
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.output_dim = output_dim
self.input_dim = input_dim
self.mem_vector_dim=mem_vec_dim
self.activity_regularizer = regularizers.get(activity_regularizer)
self.initial_weights = weights
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(MemoryNet,self).__init__(**kwargs)
def __init__(self, output_dim, L,
init='glorot_uniform', inner_init='orthogonal',
activation='tanh', inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
self.L = L
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(RHN, self).__init__(**kwargs)
def __init__(self, input_dim, output_dim,
init='uniform', input_length=None,
W_regularizer=None, activity_regularizer=None,
W_constraint=None,
mask_zero=False,
weights=None, **kwargs):
self.input_dim = input_dim
self.output_dim = output_dim
self.init = initializations.get(init)
self.input_length = input_length
self.mask_zero = mask_zero
self.W_constraint = constraints.get(W_constraint)
self.constraints = [self.W_constraint]
self.W_regularizer = regularizers.get(W_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.initial_weights = weights
kwargs['input_shape'] = (self.input_dim,)
super(Embedding2D, self).__init__(**kwargs)
def __init__(self, input_dim, output_dim,
init='uniform', input_length=None,
W_regularizer=None, activity_regularizer=None,
W_constraint=None,
mask_zero=False,
weights=None, **kwargs):
self.input_dim = input_dim
self.output_dim = output_dim
self.init = initializations.get(init)
self.input_length = input_length
self.mask_zero = mask_zero
self.W_constraint = constraints.get(W_constraint)
self.constraints = [self.W_constraint]
self.W_regularizer = regularizers.get(W_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.initial_weights = weights
kwargs['input_shape'] = (self.input_dim,)
super(Embedding, self).__init__(**kwargs)
def __init__(self, downsampling_factor=10, init='glorot_uniform', activation='linear',
weights=None, W_regularizer=None, activity_regularizer=None,
W_constraint=None, input_dim=None, **kwargs):
self.downsampling_factor = downsampling_factor
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.initial_weights = weights
self.input_dim = input_dim
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
self.input_spec = [InputSpec(ndim=4)]
super(EltWiseProduct, self).__init__(**kwargs)
def __init__(self, nb_classes, frequency_table=None, mode=0, init='glorot_uniform', weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, verbose=False, **kwargs):
'''
# Arguments:
nb_classes: Number of classes.
frequency_table: list. Frequency of each class. More frequent classes will have shorter huffman codes.
mode: integer. One of [0, 1]
verbose: boolean. Set to true to see the progress of building huffman tree.
'''
self.nb_classes = nb_classes
if frequency_table is None:
frequency_table = [1] * nb_classes
self.frequency_table = frequency_table
self.mode = mode
self.init = initializations.get(init)
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.initial_weights = weights
self.verbose = verbose
super(Huffmax, self).__init__(**kwargs)
layer_normalization_RNN.py 文件源码
项目:New_Layers-Keras-Tensorflow
作者: WeidiXie
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
activation='tanh', beta_init='zero', gamma_init='one',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
gamma_regularizer=None, beta_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.activation = activations.get(activation)
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.beta_init = initializations.get(beta_init)
self.gamma_init = initializations.get(gamma_init)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.dropout_W = dropout_W
self.dropout_U = dropout_U
self.epsilon = 1e-5
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(LN_SimpleRNN, self).__init__(**kwargs)
def __init__(self, input_dim, output_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid'):
#self.input_dim = input_dim
self.output_dim = int(output_dim / 2)
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.input_dim = input_dim
#self.input = K.placeholder(input_shape)
# initial states: 2 all-zero tensor of shape (output_dim)
self.forward_lstm = LSTM(input_dim = input_dim, output_dim = self.output_dim)
self.backward_lstm = LSTM(input_dim = input_dim, output_dim = self.output_dim)
self.params = self.forward_lstm.params + self.backward_lstm.params
#if self.initial_weights is not None:
# self.set_weights(self.initial_weights)
# del self.initial_weights
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(DualCurrent, self).__init__(**kwargs)
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
activation='tanh', inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
shape_key=None, dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
self.shape_key = shape_key or {}
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
kwargs['consume_less'] = 'gpu'
super(RTTN, self).__init__(**kwargs)
self.num_actions = 4
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(DecoderVaeLSTM, self).__init__(**kwargs)
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W = dropout_W
self.dropout_U = dropout_U
self.stateful = False
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(QRNN, self).__init__(**kwargs)
def __init__(self, score_dim=1, num_hidden_layers=0, proj_dim=None, init='uniform', composition_type='HPCT',
**kwargs):
self.composition_type = composition_type
self.supports_masking = True
self.num_hidden_layers = num_hidden_layers
self.proj_dim = proj_dim
self.init = initializations.get(init)
self.proj_head = None
self.proj_prep = None
self.proj_child = None
self.scorer = None
self.hidden_layers = []
self.score_dim = score_dim
self.allowed_compositions = []
super(PrepositionPredictor, self).__init__(**kwargs)
def __init__(self, init='uniform', projection_dim=50, weights=None, **kwargs):
self.intra_attention_weights = weights
self.init = initializations.get(init)
self.projection_dim = projection_dim
super(IntraAttention, self).__init__(**kwargs)
attentionlayer.py 文件源码
项目:recurrent-attention-for-QA-SQUAD-based-on-keras
作者: wentaozhu
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def __init__(self, h, output_dim,
init='glorot_uniform', **kwargs):
self.init = initializations.get(init)
self.h = h
self.output_dim = output_dim
#removing the regularizers and the dropout
super(AttenLayer, self).__init__(**kwargs)
# this seems necessary in order to accept 3 input dimensions
# (samples, timesteps, features)
self.input_spec=[InputSpec(ndim=3)]
def __init__(self,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
"""
Keras Layer that implements an Attention mechanism for temporal data.
Supports Masking.
Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
:param kwargs:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Example:
model.add(LSTM(64, return_sequences=True))
model.add(Attention())
"""
self.supports_masking = True
self.init = initializations.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(Attention, self).__init__(**kwargs)
def get_initializer(initializer):
if keras_2:
from keras import initializers
return initializers.get(initializer)
else:
from keras import initializations
return initializations.get(initializer)
def __init__(self, **kwargs):
self.init = initializations.get('normal')
#self.input_spec = [InputSpec(ndim=3)]
super(AttLayer, self).__init__(**kwargs)
def __init__(self, **kwargs):
self.init = initializations.get('normal')
#self.input_spec = [InputSpec(ndim=3)]
super(AttLayer, self).__init__(**kwargs)
def __init__(self,
input_channels,
rows, cols,
output_shape,
num_filters,
hidden_dim,
init="glorot_uniform"):
"""
Parameters
----------
output_shape : list_like
Size of the generated matrix (x, y)
layer_sizes : array_like
List of nodes in hidden layers
init : str
Keras initializer to use for weights
"""
self.input_rows = rows
self.input_cols = cols
self.input_channels = input_channels
self.num_filters = num_filters
self.output_shape = output_shape
self.hidden_dim = hidden_dim
self.init = initializations.get(init)
self.bias_init = initializations.get("zero")
self.setup_weights()
self.num_param = np.prod(self.output_shape) * self.num_filters * \
self.input_channels