python类get()的实例源码

rtn.py 文件源码 项目:ikelos 作者: braingineer 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, output_dim,
                 init='glorot_uniform', inner_init='orthogonal',
                 activation='tanh', inner_activation='hard_sigmoid',
                 W_regularizer=None, U_regularizer=None, b_regularizer=None,
                 shape_key=None, dropout_W=0., dropout_U=0., **kwargs):
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.dropout_W, self.dropout_U = dropout_W, dropout_U
        self.shape_key = shape_key or {}

        if self.dropout_W or self.dropout_U:
            self.uses_learning_phase = True
        kwargs['consume_less'] = 'gpu'
        super(RTTN, self).__init__(**kwargs)

        self.num_actions = 4
recurrent.py 文件源码 项目:keras_bn_library 作者: bnsnapper 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, output_dim,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one', activation='tanh',
                 inner_activation='hard_sigmoid',
                 W_regularizer=None, U_regularizer=None, b_regularizer=None,
                 dropout_W=0., dropout_U=0., **kwargs):

        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.dropout_W, self.dropout_U = dropout_W, dropout_U

        if self.dropout_W or self.dropout_U:
            self.uses_learning_phase = True
        super(DecoderVaeLSTM, self).__init__(**kwargs)
recurrent.py 文件源码 项目:keras_bn_library 作者: bnsnapper 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, output_dim,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid',
                 W_regularizer=None, U_regularizer=None, b_regularizer=None,
                 dropout_W=0., dropout_U=0., **kwargs):
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.dropout_W = dropout_W
        self.dropout_U = dropout_U
        self.stateful = False

        if self.dropout_W or self.dropout_U:
            self.uses_learning_phase = True
        super(QRNN, self).__init__(**kwargs)
layers.py 文件源码 项目:keras-utilities 作者: cbaziotis 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def __init__(self,
                 W_regularizer=None, b_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, **kwargs):
        """
        Keras Layer that implements an Attention mechanism for temporal data.
        Supports Masking.
        Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
        # Input shape
            3D tensor with shape: `(samples, steps, features)`.
        # Output shape
            2D tensor with shape: `(samples, features)`.
        :param kwargs:

        Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
        The dimensions are inferred based on the output shape of the RNN.
        Example:
            model.add(LSTM(64, return_sequences=True))
            model.add(Attention())
        """
        self.supports_masking = True
        self.init = initializations.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        super(Attention, self).__init__(**kwargs)
gcnn.py 文件源码 项目:nn_playground 作者: DingKe 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, output_dim, window_size=3, stride=1,
                 kernel_initializer='uniform', bias_initializer='zero',
                 activation='linear', activity_regularizer=None,
                 kernel_regularizer=None, bias_regularizer=None,
                 kernel_constraint=None, bias_constraint=None, 
                 use_bias=True, input_dim=None, input_length=None, **kwargs):
        self.output_dim = output_dim
        self.window_size = window_size
        self.strides = (stride, 1)

        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.activation = activations.get(activation)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.input_spec = [InputSpec(ndim=3)]
        self.input_dim = input_dim
        self.input_length = input_length
        if self.input_dim:
            kwargs['input_shape'] = (self.input_length, self.input_dim)
        super(GCNN, self).__init__(**kwargs)
qrnn.py 文件源码 项目:nn_playground 作者: DingKe 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def __init__(self, units, window_size=2, stride=1,
                 return_sequences=False, go_backwards=False, 
                 stateful=False, unroll=False, activation='tanh',
                 kernel_initializer='uniform', bias_initializer='zero',
                 kernel_regularizer=None, bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None, bias_constraint=None, 
                 dropout=0, use_bias=True, input_dim=None, input_length=None,
                 **kwargs):
        self.return_sequences = return_sequences
        self.go_backwards = go_backwards
        self.stateful = stateful
        self.unroll = unroll

        self.units = units 
        self.window_size = window_size
        self.strides = (stride, 1)

        self.use_bias = use_bias
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = dropout
        self.supports_masking = True
        self.input_spec = [InputSpec(ndim=3)]
        self.input_dim = input_dim
        self.input_length = input_length
        if self.input_dim:
            kwargs['input_shape'] = (self.input_length, self.input_dim)
        super(QRNN, self).__init__(**kwargs)
layer_norm_layers.py 文件源码 项目:nn_playground 作者: DingKe 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, axis=-1,
                 gamma_init='one', beta_init='zero',
                 gamma_regularizer=None, beta_regularizer=None,
                 epsilon=1e-6, **kwargs): 
        super(LayerNormalization, self).__init__(**kwargs)

        self.axis = to_list(axis)
        self.gamma_init = initializers.get(gamma_init)
        self.beta_init = initializers.get(beta_init)
        self.gamma_regularizer = regularizers.get(gamma_regularizer)
        self.beta_regularizer = regularizers.get(beta_regularizer)
        self.epsilon = epsilon

        self.supports_masking = True
layers.py 文件源码 项目:nn_playground 作者: DingKe 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, 
                 ratio, 
                 data_format=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(SE, self).__init__(**kwargs)

        self.ratio = ratio
        self.data_format= conv_utils.normalize_data_format(data_format)

        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.supports_masking = True
FixedBatchNormalization.py 文件源码 项目:AerialCrackDetection_Keras 作者: TTMRonald 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, epsilon=1e-3, axis=-1,
                 weights=None, beta_init='zero', gamma_init='one',
                 gamma_regularizer=None, beta_regularizer=None, **kwargs):

        self.supports_masking = True
        self.beta_init = initializers.get(beta_init)
        self.gamma_init = initializers.get(gamma_init)
        self.epsilon = epsilon
        self.axis = axis
        self.gamma_regularizer = regularizers.get(gamma_regularizer)
        self.beta_regularizer = regularizers.get(beta_regularizer)
        self.initial_weights = weights
        super(FixedBatchNormalization, self).__init__(**kwargs)
FixedBatchNormalization.py 文件源码 项目:keras-frcnn 作者: yhenon 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, epsilon=1e-3, axis=-1,
                 weights=None, beta_init='zero', gamma_init='one',
                 gamma_regularizer=None, beta_regularizer=None, **kwargs):

        self.supports_masking = True
        self.beta_init = initializers.get(beta_init)
        self.gamma_init = initializers.get(gamma_init)
        self.epsilon = epsilon
        self.axis = axis
        self.gamma_regularizer = regularizers.get(gamma_regularizer)
        self.beta_regularizer = regularizers.get(beta_regularizer)
        self.initial_weights = weights
        super(FixedBatchNormalization, self).__init__(**kwargs)
mobilenet.py 文件源码 项目:deep-learning-models 作者: fchollet 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self,
                 kernel_size,
                 strides=(1, 1),
                 padding='valid',
                 depth_multiplier=1,
                 data_format=None,
                 activation=None,
                 use_bias=True,
                 depthwise_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 depthwise_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 depthwise_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(DepthwiseConv2D, self).__init__(
            filters=None,
            kernel_size=kernel_size,
            strides=strides,
            padding=padding,
            data_format=data_format,
            activation=activation,
            use_bias=use_bias,
            bias_regularizer=bias_regularizer,
            activity_regularizer=activity_regularizer,
            bias_constraint=bias_constraint,
            **kwargs)
        self.depth_multiplier = depth_multiplier
        self.depthwise_initializer = initializers.get(depthwise_initializer)
        self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
        self.depthwise_constraint = constraints.get(depthwise_constraint)
        self.bias_initializer = initializers.get(bias_initializer)
convolutional.py 文件源码 项目:keras-contrib 作者: farizrahman4u 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, filters, kernel_size,
                 kernel_initializer='glorot_uniform', activation=None, weights=None,
                 padding='valid', strides=(1, 1), data_format=None,
                 kernel_regularizer=None, bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None, bias_constraint=None,
                 use_bias=True, **kwargs):
        if data_format is None:
            data_format = K.image_data_format()
        if padding not in {'valid', 'same', 'full'}:
            raise ValueError('Invalid border mode for CosineConvolution2D:', padding)
        self.filters = filters
        self.kernel_size = kernel_size
        self.nb_row, self.nb_col = self.kernel_size
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.activation = activations.get(activation)
        self.padding = padding
        self.strides = tuple(strides)
        self.data_format = normalize_data_format(data_format)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.use_bias = use_bias
        self.input_spec = [InputSpec(ndim=4)]
        self.initial_weights = weights
        super(CosineConvolution2D, self).__init__(**kwargs)
depthwise_conv.py 文件源码 项目:MobileNetworks 作者: titu1994 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self,
                 kernel_size,
                 strides=(1, 1),
                 padding='valid',
                 depth_multiplier=1,
                 data_format=None,
                 activation=None,
                 use_bias=True,
                 depthwise_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 depthwise_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 depthwise_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(DepthwiseConv2D, self).__init__(
            filters=None,
            kernel_size=kernel_size,
            strides=strides,
            padding=padding,
            data_format=data_format,
            activation=activation,
            use_bias=use_bias,
            bias_regularizer=bias_regularizer,
            activity_regularizer=activity_regularizer,
            bias_constraint=bias_constraint,
            **kwargs)
        self.depth_multiplier = depth_multiplier
        self.depthwise_initializer = initializers.get(depthwise_initializer)
        self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
        self.depthwise_constraint = constraints.get(depthwise_constraint)
        self.bias_initializer = initializers.get(bias_initializer)

        self._padding = _preprocess_padding(self.padding)
        self._strides = (1,) + self.strides + (1,)
        self._data_format = "NHWC"
dense_tensor.py 文件源码 项目:dense_tensor 作者: bstriner 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def __init__(self, units,
                 activation='linear',
                 weights=None,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 bias_initializer='uniform',
                 bias_regularizer=None,
                 bias_constraint=None,
                 activity_regularizer=None,
                 bias=True,
                 input_dim=None,
                 factorization=simple_tensor_factorization(),
                 **kwargs):
        self.activation = activations.get(activation)
        self.units = units
        self.input_dim = input_dim
        self.factorization = factorization

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_initializer = get_initializer(kernel_initializer)
        self.bias_initializer = get_initializer(bias_initializer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.bias = bias
        self.initial_weights = weights
        self.input_spec = [InputSpec(ndim=2)]

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(DenseTensor, self).__init__(**kwargs)
convolutional.py 文件源码 项目:kfs 作者: the-moliver 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, filters_simple, filters_complex, nb_row, nb_col,
                 init='glorot_uniform', activation='relu', weights=None,
                 padding='valid', strides=(1, 1), data_format=K.image_data_format(),
                 kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
                 W_constraint=None, bias_constraint=None,
                 bias=True, **kwargs):

        if padding not in {'valid', 'same'}:
            raise Exception('Invalid border mode for Convolution2DEnergy:', padding)
        self.filters_simple = filters_simple
        self.filters_complex = filters_complex
        self.nb_row = nb_row
        self.nb_col = nb_col
        self.init = initializers.get(init, data_format=data_format)
        self.activation = activations.get(activation)
        assert padding in {'valid', 'same'}, 'padding must be in {valid, same}'
        self.padding = padding
        self.strides = tuple(strides)
        assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {tf, th}'
        self.data_format = data_format

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.UnitNormOrthogonal(filters_complex, data_format)
        self.bias_constraint = constraints.get(bias_constraint)

        self.bias = bias
        self.input_spec = [InputSpec(ndim=4)]
        self.initial_weights = weights
        super(Convolution2DEnergy, self).__init__(**kwargs)
convolutional.py 文件源码 项目:kfs 作者: the-moliver 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def __init__(self, rank,
                 kernel_size=3,
                 data_format=None,
                 kernel_initialization=.1,
                 bias_initialization=1,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(_ConvGDN, self).__init__(**kwargs)
        self.rank = rank
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
        self.strides = conv_utils.normalize_tuple(1, rank, 'strides')
        self.padding = conv_utils.normalize_padding('same')
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(1, rank, 'dilation_rate')
        self.kernel_initializer = initializers.Constant(kernel_initialization)
        self.bias_initializer = initializers.Constant(bias_initialization)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(ndim=self.rank + 2)
convolutional.py 文件源码 项目:kfs 作者: the-moliver 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, filters,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=kconstraints.NonNeg(),
                 k_initializer='zeros',
                 k_regularizer=None,
                 k_constraint=None,
                 tied_k=False,
                 activity_regularizer=None,
                 strides=1,
                 padding='valid',
                 dilation_rate=1,
                 data_format=K.image_data_format(),
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)
        super(Conv2DSoftMinMax, self).__init__(**kwargs)

        self.filters = filters
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.k_initializer = initializers.get(k_initializer)
        self.k_regularizer = regularizers.get(k_regularizer)
        self.k_constraint = constraints.get(k_constraint)
        self.tied_k = tied_k
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
        self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate')
        self.padding = conv_utils.normalize_padding(padding)
        self.input_spec = InputSpec(min_ndim=2)
        self.data_format = data_format
        self.supports_masking = True
advanced_activations.py 文件源码 项目:kfs 作者: the-moliver 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def __init__(self, init='one', power_init=1, weights=None, axis=-1, fit=True, **kwargs):
        self.supports_masking = True
        self.init = initializations.get(init)
        self.initial_weights = weights
        self.axis = axis
        self.power_init = power_init
        self.fit = fit
        super(PowerReLU, self).__init__(**kwargs)
neuro.py 文件源码 项目:kfs 作者: the-moliver 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, quadratic_filters_ex=2, quadratic_filters_sup=2, W_quad_ex_initializer='glorot_uniform',
                 W_quad_sup_initializer='glorot_uniform', W_lin_initializer='glorot_uniform',
                 W_quad_ex_regularizer=None, W_quad_sup_regularizer=None, W_lin_regularizer=None,
                 W_quad_ex_constraint=None, W_quad_sup_constraint=None, W_lin_constraint=None,
                 **kwargs):

        self.quadratic_filters_ex = quadratic_filters_ex
        self.quadratic_filters_sup = quadratic_filters_sup

        self.W_quad_ex_initializer = initializers.get(W_quad_ex_initializer)
        self.W_quad_sup_initializer = initializers.get(W_quad_sup_initializer)
        self.W_lin_initializer = initializers.get(W_lin_initializer)

        self.W_quad_ex_constraint = constraints.get(W_quad_ex_constraint)
        self.W_quad_sup_constraint = constraints.get(W_quad_sup_constraint)
        self.W_lin_constraint = constraints.get(W_lin_constraint)

        self.W_quad_ex_regularizer = regularizers.get(W_quad_ex_regularizer)
        self.W_quad_sup_regularizer = regularizers.get(W_quad_sup_regularizer)
        self.W_lin_regularizer = regularizers.get(W_lin_regularizer)

        self.input_spec = [InputSpec(ndim=2)]

        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)
        super(RustSTC, self).__init__(**kwargs)
neuro.py 文件源码 项目:kfs 作者: the-moliver 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, weights=None, kernel_initializer='glorot_uniform',
                 alpha_initializer='ones', alpha_regularizer=None, alpha_constraint=None,
                 beta_delta_initializer='ones', beta_delta_regularizer=None, beta_delta_constraint=None,
                 gamma_eta_initializer='ones', gamma_eta_regularizer=None, gamma_eta_constraint=None,
                 rho_initializer='ones', rho_regularizer=None, rho_constraint=None,
                 **kwargs):

        self.alpha_initializer = initializers.get(alpha_initializer)
        self.beta_delta_initializer = initializers.get(beta_delta_initializer)
        self.gamma_eta_initializer = initializers.get(gamma_eta_initializer)
        self.rho_initializer = initializers.get(rho_initializer)

        self.alpha_constraint = constraints.get(alpha_constraint)
        self.beta_delta_constraint = constraints.get(beta_delta_constraint)
        self.gamma_eta_constraint = constraints.get(gamma_eta_constraint)
        self.rho_constraint = constraints.get(rho_constraint)

        self.alpha_regularizer = regularizers.get(alpha_regularizer)
        self.beta_delta_regularizer = regularizers.get(beta_delta_regularizer)
        self.gamma_eta_regularizer = regularizers.get(gamma_eta_regularizer)
        self.rho_regularizer = regularizers.get(rho_regularizer)

        self.input_spec = [InputSpec(ndim=2)]

        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)
        super(NakaRushton, self).__init__(**kwargs)


问题


面经


文章

微信
公众号

扫码关注公众号