python类get()的实例源码

layers.py 文件源码 项目:keras-utilities 作者: cbaziotis 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self,
                 W_regularizer=None, u_regularizer=None, b_regularizer=None,
                 W_constraint=None, u_constraint=None, b_constraint=None,
                 bias=True, **kwargs):

        self.supports_masking = True
        self.init = initializations.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.u_regularizer = regularizers.get(u_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.u_constraint = constraints.get(u_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        super(AttentionWithContext, self).__init__(**kwargs)
SparseFullyConnectedLayer.py 文件源码 项目:MatchZoo 作者: faneshion 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, output_dim, init='glorot_uniform', activation='relu',weights=None,
            W_regularizer=None, b_regularizer=None, activity_regularizer=None,
            W_constraint=None, b_constraint=None, input_dim=None, **kwargs):
        self.W_initializer = initializers.get(init)
        self.b_initializer = initializers.get('zeros')
        self.activation = activations.get(activation)
        self.output_dim = output_dim
        self.input_dim = input_dim

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.initial_weights = weights
        self.input_spec = InputSpec(ndim=2)

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(SparseFullyConnectedLayer, self).__init__(**kwargs)
renormalization.py 文件源码 项目:DeepTrade_keras 作者: happynoom 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, epsilon=1e-3, mode=0, axis=-1, momentum=0.99,
                 r_max_value=3., d_max_value=5., t_delta=1., weights=None, beta_init='zero',
                 gamma_init='one', gamma_regularizer=None, beta_regularizer=None,
                 **kwargs):
        self.supports_masking = True
        self.beta_init = initializers.get(beta_init)
        self.gamma_init = initializers.get(gamma_init)
        self.epsilon = epsilon
        self.mode = mode
        self.axis = axis
        self.momentum = momentum
        self.gamma_regularizer = regularizers.get(gamma_regularizer)
        self.beta_regularizer = regularizers.get(beta_regularizer)
        self.initial_weights = weights
        self.r_max_value = r_max_value
        self.d_max_value = d_max_value
        self.t_delta = t_delta
        if self.mode == 0:
            self.uses_learning_phase = True
        super(BatchRenormalization, self).__init__(**kwargs)
itosfm.py 文件源码 项目:State-Frequency-Memory-stock-prediction 作者: z331565360 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, output_dim, freq_dim, hidden_dim,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one', activation='tanh',
                 inner_activation='hard_sigmoid',
                 W_regularizer=None, U_regularizer=None, b_regularizer=None,
                 dropout_W=0., dropout_U=0., **kwargs):
        self.output_dim = output_dim
        self.freq_dim = freq_dim
        self.hidden_dim = hidden_dim
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.dropout_W, self.dropout_U = dropout_W, dropout_U

        if self.dropout_W or self.dropout_U:
            self.uses_learning_phase = True
        super(ITOSFM, self).__init__(**kwargs)
itosfm.py 文件源码 项目:State-Frequency-Memory-stock-prediction 作者: z331565360 项目源码 文件源码 阅读 75 收藏 0 点赞 0 评论 0
def __init__(self, output_dim, freq_dim, hidden_dim,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one', activation='tanh',
                 inner_activation='hard_sigmoid',
                 W_regularizer=None, U_regularizer=None, b_regularizer=None,
                 dropout_W=0., dropout_U=0., **kwargs):
        self.output_dim = output_dim
        self.freq_dim = freq_dim
        self.hidden_dim = hidden_dim
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.dropout_W, self.dropout_U = dropout_W, dropout_U

        if self.dropout_W or self.dropout_U:
            self.uses_learning_phase = True
        super(ITOSFM, self).__init__(**kwargs)
ChainCRF.py 文件源码 项目:emnlp2017-bilstm-cnn-crf 作者: UKPLab 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, init='glorot_uniform',
                 U_regularizer=None, b_start_regularizer=None, b_end_regularizer=None,
                 U_constraint=None, b_start_constraint=None, b_end_constraint=None,
                 weights=None,
                 **kwargs):
        self.supports_masking = True
        self.uses_learning_phase = True
        self.input_spec = [InputSpec(ndim=3)]
        self.init = initializations.get(init)

        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_start_regularizer = regularizers.get(b_start_regularizer)
        self.b_end_regularizer = regularizers.get(b_end_regularizer)
        self.U_constraint = constraints.get(U_constraint)
        self.b_start_constraint = constraints.get(b_start_constraint)
        self.b_end_constraint = constraints.get(b_end_constraint)

        self.initial_weights = weights

        super(ChainCRF, self).__init__(**kwargs)
lstm2ntm.py 文件源码 项目:NTM-Keras 作者: SigmaQuan 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, output_dim, memory_dim=128, memory_size=20,
                 controller_output_dim=100, location_shift_range=1,
                 num_read_head=1, num_write_head=1,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one', activation='tanh',
                 inner_activation='hard_sigmoid',
                 W_regularizer=None, U_regularizer=None, R_regularizer=None,
                 b_regularizer=None, W_y_regularizer=None,
                 W_xi_regularizer=None, W_r_regularizer=None,
                 dropout_W=0., dropout_U=0., **kwargs):
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.dropout_W, self.dropout_U = dropout_W, dropout_U

        if self.dropout_W or self.dropout_U:
            self.uses_learning_phase = True
        super(NTM, self).__init__(**kwargs)
ppmask.py 文件源码 项目:ppap 作者: unique-horn 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def __init__(self,
                 mask_shape,
                 layer_sizes,
                 scale,
                 bias=None,
                 act_reg=None,
                 **kwargs):
        """
        """

        self.mask_shape = mask_shape
        self.layer_sizes = layer_sizes
        self.scale = scale
        self.gen = generators.FFMatrixGen2D(output_shape=mask_shape,
                                            layer_sizes=layer_sizes,
                                            scale=scale)

        self.bias = bias
        self.act_reg = regularizers.get(act_reg)

        super().__init__(**kwargs)
advanced_activations.py 文件源码 项目:kfs 作者: the-moliver 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, alpha_initializer=0.2,
                 beta_initializer=5.0,
                 alpha_regularizer=None,
                 alpha_constraint=None,
                 beta_regularizer=None,
                 beta_constraint=None,
                 shared_axes=None,
                 **kwargs):
        super(ParametricSoftplus, self).__init__(**kwargs)
        self.supports_masking = True
        self.alpha_initializer = initializers.get(alpha_initializer)
        self.alpha_regularizer = regularizers.get(alpha_regularizer)
        self.alpha_constraint = constraints.get(alpha_constraint)
        self.beta_initializer = initializers.get(beta_initializer)
        self.beta_regularizer = regularizers.get(beta_regularizer)
        self.beta_constraint = constraints.get(beta_constraint)
        if shared_axes is None:
            self.shared_axes = None
        elif not isinstance(shared_axes, (list, tuple)):
            self.shared_axes = [shared_axes]
        else:
            self.shared_axes = list(shared_axes)
neuro.py 文件源码 项目:kfs 作者: the-moliver 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, filters,
                 centers_initializer='zeros',
                 centers_regularizer=None,
                 centers_constraint=None,
                 stds_initializer='ones',
                 stds_regularizer=None,
                 stds_constraint=None,
                 gauss_scale=100,
                 **kwargs):
        self.filters = filters
        self.gauss_scale = gauss_scale
        super(GaussianReceptiveFields, self).__init__(**kwargs)
        self.centers_initializer = initializers.get(centers_initializer)
        self.stds_initializer = initializers.get(stds_initializer)
        self.centers_regularizer = regularizers.get(centers_regularizer)
        self.stds_regularizer = regularizers.get(stds_regularizer)
        self.centers_constraint = constraints.get(centers_constraint)
        self.stds_constraint = constraints.get(stds_constraint)
neuro.py 文件源码 项目:kfs 作者: the-moliver 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, quadratic_filters=2, init='glorot_uniform', weights=None,
                 W_quad_regularizer=None, W_lin_regularizer=None, activity_regularizer=None,
                 W_quad_constraint=None, W_lin_constraint=None,
                 bias=True, input_dim=None, **kwargs):
        self.init = initializations.get(init)
        self.quadratic_filters = quadratic_filters
        self.input_dim = input_dim

        self.W_quad_regularizer = regularizers.get(W_quad_regularizer)
        self.W_lin_regularizer = regularizers.get(W_lin_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_quad_constraint = constraints.get(W_quad_constraint)
        self.W_lin_constraint = constraints.get(W_lin_constraint)

        self.initial_weights = weights
        self.input_spec = [InputSpec(ndim=2)]

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(GQM, self).__init__(**kwargs)
neuro.py 文件源码 项目:kfs 作者: the-moliver 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, quadratic_filters=2, init='glorot_uniform', weights=None,
                 W_quad_regularizer=None, W_lin_regularizer=None, activity_regularizer=None,
                 W_quad_constraint=None, W_lin_constraint=None,
                 bias=True, input_dim=None, **kwargs):
        self.init = initializations.get(init)
        self.quadratic_filters = quadratic_filters
        self.input_dim = input_dim

        self.W_quad_regularizer = regularizers.get(W_quad_regularizer)
        self.W_lin_regularizer = regularizers.get(W_lin_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_quad_constraint = constraints.get(W_quad_constraint)
        self.W_lin_constraint = constraints.get(W_lin_constraint)

        self.initial_weights = weights
        self.input_spec = [InputSpec(ndim=5)]

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(GQM_conv, self).__init__(**kwargs)
neuro.py 文件源码 项目:kfs 作者: the-moliver 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, quadratic_filters=2, init='glorot_uniform', weights=None,
                 W_quad_regularizer=None, W_lin_regularizer=None, activity_regularizer=None,
                 W_quad_constraint=None, W_lin_constraint=None,
                 bias=True, input_dim=None, **kwargs):
        self.init = initializations.get(init)
        self.quadratic_filters = quadratic_filters
        self.input_dim = input_dim

        self.W_quad_regularizer = regularizers.get(W_quad_regularizer)
        self.W_lin_regularizer = regularizers.get(W_lin_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_quad_constraint = constraints.get(W_quad_constraint)
        self.W_lin_constraint = constraints.get(W_lin_constraint)

        self.initial_weights = weights
        self.input_spec = [InputSpec(ndim=5)]

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(GQM_4D, self).__init__(**kwargs)
core.py 文件源码 项目:kfs 作者: the-moliver 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, units,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=constraints.NonNeg(),
                 k_initializer='zeros',
                 k_regularizer=None,
                 k_constraint=None,
                 tied_k=False,
                 activity_regularizer=None,
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)
        super(SoftMinMax, self).__init__(**kwargs)

        self.units = units
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.k_initializer = initializers.get(k_initializer)
        self.k_regularizer = regularizers.get(k_regularizer)
        self.k_constraint = constraints.get(k_constraint)
        self.tied_k = tied_k
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.input_spec = InputSpec(min_ndim=2)
        self.supports_masking = True
core.py 文件源码 项目:kfs 作者: the-moliver 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, output_dim, init='glorot_uniform',
                 activation=None, weights=None,
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, input_dim=None, **kwargs):
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.output_dim = output_dim
        self.input_dim = input_dim

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.initial_weights = weights
        self.input_spec = [InputSpec(ndim='2+')]

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(DenseNonNeg, self).__init__(**kwargs)
core.py 文件源码 项目:kfs 作者: the-moliver 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, init='glorot_uniform',
                 activation=None, weights=None,
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, input_dim=None, **kwargs):
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.input_dim = input_dim

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.initial_weights = weights
        self.input_spec = [InputSpec(ndim='2+')]

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(Feedback, self).__init__(**kwargs)
core.py 文件源码 项目:kfs 作者: the-moliver 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def __init__(self, init='glorot_uniform',
                 activation=None, weights=None,
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, input_dim=None, **kwargs):
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.input_dim = input_dim

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.initial_weights = weights
        self.input_spec = [InputSpec(ndim='2+')]

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(DivisiveNormalization, self).__init__(**kwargs)
ChainCRF.py 文件源码 项目:SGAITagger 作者: zhiweiuu 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, init='glorot_uniform',
                 U_regularizer=None, b_start_regularizer=None, b_end_regularizer=None,
                 U_constraint=None, b_start_constraint=None, b_end_constraint=None,
                 weights=None,
                 **kwargs):
        self.supports_masking = True
        self.uses_learning_phase = True
        self.input_spec = [InputSpec(ndim=3)]
        self.init = initializations.get(init)

        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_start_regularizer = regularizers.get(b_start_regularizer)
        self.b_end_regularizer = regularizers.get(b_end_regularizer)
        self.U_constraint = constraints.get(U_constraint)
        self.b_start_constraint = constraints.get(b_start_constraint)
        self.b_end_constraint = constraints.get(b_end_constraint)

        self.initial_weights = weights

        super(ChainCRF, self).__init__(**kwargs)
renormalization.py 文件源码 项目:LIE 作者: EmbraceLife 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, epsilon=1e-3, mode=0, axis=-1, momentum=0.99,
                 r_max_value=3., d_max_value=5., t_delta=1., weights=None, beta_init='zero',
                 gamma_init='one', gamma_regularizer=None, beta_regularizer=None,
                 **kwargs):
        self.supports_masking = True
        self.beta_init = initializers.get(beta_init)
        self.gamma_init = initializers.get(gamma_init)
        self.epsilon = epsilon
        self.mode = mode
        self.axis = axis
        self.momentum = momentum
        self.gamma_regularizer = regularizers.get(gamma_regularizer)
        self.beta_regularizer = regularizers.get(beta_regularizer)
        self.initial_weights = weights
        self.r_max_value = r_max_value
        self.d_max_value = d_max_value
        self.t_delta = t_delta
        if self.mode == 0:
            self.uses_learning_phase = True
        super(BatchRenormalization, self).__init__(**kwargs)
discrimination.py 文件源码 项目:Keras-GAN-Animeface-Character 作者: forcecore 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, nb_kernels, kernel_dim, init='glorot_uniform', weights=None,
                 W_regularizer=None, activity_regularizer=None,
                 W_constraint=None, input_dim=None, **kwargs):
        self.init = initializers.get(init)
        self.nb_kernels = nb_kernels
        self.kernel_dim = kernel_dim
        self.input_dim = input_dim

        self.W_regularizer = regularizers.get(W_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)

        self.initial_weights = weights
        self.input_spec = [InputSpec(ndim=2)]

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(MinibatchDiscrimination, self).__init__(**kwargs)
memory_layers.py 文件源码 项目:Keras_note 作者: LibCorner 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self,output_dim,mem_vec_dim,init='glorot_uniform', activation='linear', weights=None,
                 activity_regularizer=None,input_dim=None, **kwargs):
        '''
        Params:
            output_dim: ?????
            mem_vec_dim: query?????

        '''
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.output_dim = output_dim
        self.input_dim = input_dim
        self.mem_vector_dim=mem_vec_dim

        self.activity_regularizer = regularizers.get(activity_regularizer)


        self.initial_weights = weights

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(MemoryNet,self).__init__(**kwargs)
rhn.py 文件源码 项目:deep-models 作者: LaurentMazare 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, output_dim, L,
             init='glorot_uniform', inner_init='orthogonal',
             activation='tanh', inner_activation='hard_sigmoid',
             W_regularizer=None, U_regularizer=None, b_regularizer=None,
             dropout_W=0., dropout_U=0., **kwargs):
    self.output_dim = output_dim
    self.init = initializations.get(init)
    self.inner_init = initializations.get(inner_init)
    self.activation = activations.get(activation)
    self.inner_activation = activations.get(inner_activation)
    self.W_regularizer = regularizers.get(W_regularizer)
    self.U_regularizer = regularizers.get(U_regularizer)
    self.b_regularizer = regularizers.get(b_regularizer)
    self.dropout_W, self.dropout_U = dropout_W, dropout_U
    self.L = L

    if self.dropout_W or self.dropout_U:
        self.uses_learning_phase = True
    super(RHN, self).__init__(**kwargs)
embedding2D.py 文件源码 项目:NN_sentiment 作者: hx364 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, input_dim, output_dim,
                 init='uniform', input_length=None,
                 W_regularizer=None, activity_regularizer=None,
                 W_constraint=None,
                 mask_zero=False,
                 weights=None, **kwargs):
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.input_length = input_length
        self.mask_zero = mask_zero

        self.W_constraint = constraints.get(W_constraint)
        self.constraints = [self.W_constraint]

        self.W_regularizer = regularizers.get(W_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.initial_weights = weights
        kwargs['input_shape'] = (self.input_dim,)
        super(Embedding2D, self).__init__(**kwargs)
embedding2D.py 文件源码 项目:NN_sentiment 作者: hx364 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, input_dim, output_dim,
                 init='uniform', input_length=None,
                 W_regularizer=None, activity_regularizer=None,
                 W_constraint=None,
                 mask_zero=False,
                 weights=None, **kwargs):
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.input_length = input_length
        self.mask_zero = mask_zero

        self.W_constraint = constraints.get(W_constraint)
        self.constraints = [self.W_constraint]

        self.W_regularizer = regularizers.get(W_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.initial_weights = weights
        kwargs['input_shape'] = (self.input_dim,)
        super(Embedding, self).__init__(**kwargs)
layers.py 文件源码 项目:anago 作者: Hironsan 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, init='glorot_uniform',
                 U_regularizer=None,
                 b_start_regularizer=None,
                 b_end_regularizer=None,
                 U_constraint=None,
                 b_start_constraint=None,
                 b_end_constraint=None,
                 weights=None,
                 **kwargs):
        super(ChainCRF, self).__init__(**kwargs)
        self.init = initializers.get(init)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_start_regularizer = regularizers.get(b_start_regularizer)
        self.b_end_regularizer = regularizers.get(b_end_regularizer)
        self.U_constraint = constraints.get(U_constraint)
        self.b_start_constraint = constraints.get(b_start_constraint)
        self.b_end_constraint = constraints.get(b_end_constraint)

        self.initial_weights = weights

        self.supports_masking = True
        self.uses_learning_phase = True
        self.input_spec = [InputSpec(ndim=3)]
eltwise_product.py 文件源码 项目:mlnet 作者: marcellacornia 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, downsampling_factor=10, init='glorot_uniform', activation='linear',
                 weights=None, W_regularizer=None, activity_regularizer=None,
                 W_constraint=None, input_dim=None, **kwargs):

        self.downsampling_factor = downsampling_factor
        self.init = initializations.get(init)
        self.activation = activations.get(activation)

        self.W_regularizer = regularizers.get(W_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)

        self.initial_weights = weights

        self.input_dim = input_dim
        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)

        self.input_spec = [InputSpec(ndim=4)]
        super(EltWiseProduct, self).__init__(**kwargs)
huffmax.py 文件源码 项目:huffmax 作者: farizrahman4u 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, nb_classes, frequency_table=None, mode=0, init='glorot_uniform', weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, verbose=False, **kwargs):
        '''
        # Arguments:
        nb_classes: Number of classes.
        frequency_table: list. Frequency of each class. More frequent classes will have shorter huffman codes.
        mode: integer. One of [0, 1]
        verbose: boolean. Set to true to see the progress of building huffman tree. 
        '''
        self.nb_classes = nb_classes
        if frequency_table is None:
            frequency_table = [1] * nb_classes
        self.frequency_table = frequency_table
        self.mode = mode
        self.init = initializations.get(init)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.bias = bias
        self.initial_weights = weights
        self.verbose = verbose
        super(Huffmax, self).__init__(**kwargs)
model_library.py 文件源码 项目:CIAN 作者: yanghanxy 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self,
                 W_regularizer=None, u_regularizer=None, b_regularizer=None,
                 W_constraint=None, u_constraint=None, b_constraint=None,
                 W_dropout=0., u_dropout=0., bias=True, **kwargs):

        self.supports_masking = True
        self.W_init = initializers.get('orthogonal')
        self.u_init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.u_regularizer = regularizers.get(u_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.u_constraint = constraints.get(u_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.W_dropout = min(1., max(0., W_dropout))
        self.u_dropout = min(1., max(0., u_dropout))

        self.bias = bias

        super(AttentionWithContext, self).__init__(**kwargs)
layer_normalization_RNN.py 文件源码 项目:New_Layers-Keras-Tensorflow 作者: WeidiXie 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __init__(self, output_dim,
                 init='glorot_uniform', inner_init='orthogonal',
                 activation='tanh', beta_init='zero', gamma_init='one',
                 W_regularizer=None, U_regularizer=None, b_regularizer=None,
                 gamma_regularizer=None, beta_regularizer=None,
                 dropout_W=0., dropout_U=0., **kwargs):
        self.output_dim = output_dim
        self.activation = activations.get(activation)
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.beta_init = initializations.get(beta_init)
        self.gamma_init = initializations.get(gamma_init)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.gamma_regularizer = regularizers.get(gamma_regularizer)
        self.beta_regularizer = regularizers.get(beta_regularizer)
        self.dropout_W = dropout_W
        self.dropout_U = dropout_U
        self.epsilon = 1e-5
        if self.dropout_W or self.dropout_U:
            self.uses_learning_phase = True
        super(LN_SimpleRNN, self).__init__(**kwargs)
rtn.py 文件源码 项目:ikelos 作者: braingineer 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, output_dim,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one', activation='tanh',
                 inner_activation='hard_sigmoid',
                 W_regularizer=None, U_regularizer=None, b_regularizer=None,
                 dropout_W=0., dropout_U=0., **kwargs):
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.dropout_W, self.dropout_U = dropout_W, dropout_U

        if self.dropout_W or self.dropout_U:
            self.uses_learning_phase = True
        super(DualCurrent, self).__init__(**kwargs)


问题


面经


文章

微信
公众号

扫码关注公众号