python类l2()的实例源码

rnn.py 文件源码 项目:Hotpot 作者: Liang-Qiu 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def prep_model(inputs, N, s0pad, s1pad, c):
    outputs = B.rnn_input(inputs, N, s0pad,
                dropout=c['dropout'], dropoutfix_inp=c['dropoutfix_inp'], dropoutfix_rec=c['dropoutfix_rec'],
                sdim=c['sdim'],
                rnnbidi=c['rnnbidi'], rnn=c['rnn'], rnnact=c['rnnact'], rnninit=c['rnninit'],
                rnnbidi_mode=c['rnnbidi_mode'], rnnlevels=c['rnnlevels'])

    # Projection
    if c['project']:
        proj = Dense(int(N*c['pdim']), activation=c['pact'], kernel_regularizer=l2(c['l2reg']), name='proj')
        e0p = proj(outputs[0])
        e1p = proj(outputs[1])
        N = N*c['pdim']
        return [e0p, e1p], N 
    else:
        return [outputs[0], outputs[1]], N

    #input_dim=int(N*c['sdim'])
resnet.py 文件源码 项目:NetworkCompress 作者: luzai 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _conv_bn_relu(**conv_params):
    """Helper to build a conv -> BN -> relu block
    """
    nb_filter = conv_params["nb_filter"]
    nb_row = conv_params["nb_row"]
    nb_col = conv_params["nb_col"]
    subsample = conv_params.setdefault("subsample", (1, 1))
    init = conv_params.setdefault("init", "he_normal")
    border_mode = conv_params.setdefault("border_mode", "same")
    W_regularizer = conv_params.setdefault("W_regularizer", l2(1.e-4))

    def f(input):
        conv = Convolution2D(nb_filter=nb_filter, nb_row=nb_row, nb_col=nb_col, subsample=subsample,
                             init=init, border_mode=border_mode, W_regularizer=W_regularizer)(input)
        return _bn_relu(conv)

    return f
resnet.py 文件源码 项目:NetworkCompress 作者: luzai 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _bn_relu_conv(**conv_params):
    """Helper to build a BN -> relu -> conv block.
    This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf
    """
    nb_filter = conv_params["nb_filter"]
    nb_row = conv_params["nb_row"]
    nb_col = conv_params["nb_col"]
    subsample = conv_params.setdefault("subsample", (1, 1))
    init = conv_params.setdefault("init", "he_normal")
    border_mode = conv_params.setdefault("border_mode", "same")
    W_regularizer = conv_params.setdefault("W_regularizer", l2(1.e-4))

    def f(input):
        activation = _bn_relu(input)
        return Convolution2D(nb_filter=nb_filter, nb_row=nb_row, nb_col=nb_col, subsample=subsample,
                             init=init, border_mode=border_mode, W_regularizer=W_regularizer)(activation)

    return f
resnet.py 文件源码 项目:NetworkCompress 作者: luzai 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _shortcut(input, residual):
    """Adds a shortcut between input and residual block and merges them with "sum"
    """
    # Expand channels of shortcut to match residual.
    # Stride appropriately to match residual (width, height)
    # Should be int if network architecture is correctly configured.
    input_shape = K.int_shape(input)
    residual_shape = K.int_shape(residual)
    stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
    stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
    equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]

    shortcut = input
    # 1 X 1 conv if shape is different. Else identity.
    if stride_width > 1 or stride_height > 1 or not equal_channels:
        shortcut = Convolution2D(nb_filter=residual_shape[CHANNEL_AXIS],
                                 nb_row=1, nb_col=1,
                                 subsample=(stride_width, stride_height),
                                 init="he_normal", border_mode="valid",
                                 W_regularizer=l2(0.0001))(input)

    return merge([shortcut, residual], mode="sum")
malwaresnet.py 文件源码 项目:youarespecial 作者: endgameinc 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def ResidualBlock1D_helper(layers, kernel_size, filters, final_stride=1):
    def f(_input):
        basic = _input
        for ln in range(layers):
            #basic = BatchNormalization()( basic ) # triggers known keras bug w/ TimeDistributed: https://github.com/fchollet/keras/issues/5221
            basic = ELU()(basic)  
            basic = Conv1D(filters, kernel_size, kernel_initializer='he_normal',
                           kernel_regularizer=l2(1.e-4), padding='same')(basic)

        # note that this strides without averaging
        return AveragePooling1D(pool_size=1, strides=final_stride)(Add()([_input, basic]))

    return f
hintbot.py 文件源码 项目:hintbot 作者: madebyollin 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def createModel(w=None,h=None):
    # Input placeholder
    original = Input(shape=(w, h, 4), name='icon_goes_here')

    # Model layer stack
    x = original
    x = Convolution2D(64, 4, 4, activation='relu', border_mode='same', b_regularizer=l2(0.1))(x)
    x = Convolution2D(64, 4, 4, activation='relu', border_mode='same', b_regularizer=l2(0.1))(x)
    x = Convolution2D(64, 4, 4, activation='relu', border_mode='same', b_regularizer=l2(0.1))(x)
    x = Convolution2D(64, 4, 4, activation='relu', border_mode='same', b_regularizer=l2(0.1))(x)
    x = AveragePooling2D((2, 2), border_mode='valid')(x)
    x = Convolution2D(16, 4, 4, activation='relu', border_mode='same', b_regularizer=l2(0.1))(x)
    x = Convolution2D(4, 4, 4, activation='relu', border_mode='same',  b_regularizer=l2(0.1))(x)
    downscaled = x

    # Compile model
    hintbot = Model(input=original, output=downscaled)
    hintbot.compile(optimizer='adam', loss='mean_squared_error')
    # Train
    if (os.path.isfile(load_weights_filepath)):
        hintbot.load_weights(load_weights_filepath)
    return hintbot
mlp_classifier.py 文件源码 项目:main 作者: rmkemker 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _model(self, input_shape):

        self.model.add(Dense(self.hidden[0], 
                        input_shape=(input_shape[1],), 
                        kernel_regularizer=l2(self.wd),
                        kernel_initializer=self.ki))
        if self.bn:
            self.model.add(BatchNormalization(axis=1))
        self.model.add(Activation(self.activation))

        for i in self.hidden[1:]:
            self.model.add(Dense(i, kernel_regularizer=l2(self.wd),
                                 kernel_initializer=self.ki))
            if self.bn:
                self.model.add(BatchNormalization(axis=1))
            self.model.add(Activation(self.activation))


        self.model.add(Dense(self.N, activation='softmax',
                             kernel_regularizer=l2(self.wd),
                             kernel_initializer=self.ki))
dm_resnet.py 文件源码 项目:dream2016_dm 作者: lishen 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def _shortcut(input, residual, weight_decay=.0001, dropout=.0):
    # Expand channels of shortcut to match residual.
    # Stride appropriately to match residual (width, height)
    # Should be int if network architecture is correctly configured.
    # !!! The dropout argument is just a place holder. 
    # !!! It shall not be applied to identity mapping.
    stride_width = input._keras_shape[ROW_AXIS] // residual._keras_shape[ROW_AXIS]
    stride_height = input._keras_shape[COL_AXIS] // residual._keras_shape[COL_AXIS]
    equal_channels = residual._keras_shape[CHANNEL_AXIS] == input._keras_shape[CHANNEL_AXIS]

    shortcut = input
    # 1 X 1 conv if shape is different. Else identity.
    if stride_width > 1 or stride_height > 1 or not equal_channels:
        shortcut = Convolution2D(nb_filter=residual._keras_shape[CHANNEL_AXIS],
                                 nb_row=1, nb_col=1,
                                 subsample=(stride_width, stride_height),
                                 init="he_normal", border_mode="valid", 
                                 W_regularizer=l2(weight_decay))(input)

    return merge([shortcut, residual], mode="sum")


# Builds a residual block with repeating bottleneck blocks.
test_core.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_activity_regularization():
    from keras.engine import Input, Model

    layer = core.ActivityRegularization(l1=0.01, l2=0.01)

    # test in functional API
    x = Input(shape=(3,))
    z = core.Dense(2)(x)
    y = layer(z)
    model = Model(input=x, output=y)
    model.compile('rmsprop', 'mse', mode='FAST_COMPILE')

    model.predict(np.random.random((2, 3)))

    # test serialization
    model_config = model.get_config()
    model = Model.from_config(model_config)
    model.compile('rmsprop', 'mse')
test_core.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 49 收藏 0 点赞 0 评论 0
def test_maxout_dense():
    from keras import regularizers
    from keras import constraints

    layer_test(core.MaxoutDense,
               kwargs={'output_dim': 3},
               input_shape=(3, 2))

    layer_test(core.MaxoutDense,
               kwargs={'output_dim': 3,
                       'W_regularizer': regularizers.l2(0.01),
                       'b_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.activity_l2(0.01),
                       'W_constraint': constraints.MaxNorm(1),
                       'b_constraint': constraints.MaxNorm(1)},
               input_shape=(3, 2))
test_core.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_timedistributeddense():
    from keras import regularizers
    from keras import constraints

    layer_test(core.TimeDistributedDense,
               kwargs={'output_dim': 2, 'input_length': 2},
               input_shape=(3, 2, 3))

    layer_test(core.TimeDistributedDense,
               kwargs={'output_dim': 3,
                       'W_regularizer': regularizers.l2(0.01),
                       'b_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.activity_l2(0.01),
                       'W_constraint': constraints.MaxNorm(1),
                       'b_constraint': constraints.MaxNorm(1)},
               input_shape=(3, 2, 3))
resnet.py 文件源码 项目:keras-mxnet-benchmarks 作者: sandeep-krishnamurthy 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _conv_bn_relu(**conv_params):
    """Helper to build a conv -> BN -> relu block
    """
    nb_filter = conv_params["nb_filter"]
    nb_row = conv_params["nb_row"]
    nb_col = conv_params["nb_col"]
    subsample = conv_params.setdefault("subsample", (1, 1))
    init = conv_params.setdefault("init", "he_normal")
    border_mode = conv_params.setdefault("border_mode", "same")
    W_regularizer = conv_params.setdefault("W_regularizer", l2(1.e-4))

    def f(input):
        conv = Convolution2D(nb_filter=nb_filter, nb_row=nb_row, nb_col=nb_col, subsample=subsample,
                             init=init, border_mode=border_mode, W_regularizer=W_regularizer)(input)
        return _bn_relu(conv)

    return f
resnet.py 文件源码 项目:keras-mxnet-benchmarks 作者: sandeep-krishnamurthy 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _bn_relu_conv(**conv_params):
    """Helper to build a BN -> relu -> conv block.
    This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf
    """
    nb_filter = conv_params["nb_filter"]
    nb_row = conv_params["nb_row"]
    nb_col = conv_params["nb_col"]
    subsample = conv_params.setdefault("subsample", (1,1))
    init = conv_params.setdefault("init", "he_normal")
    border_mode = conv_params.setdefault("border_mode", "same")
    W_regularizer = conv_params.setdefault("W_regularizer", l2(1.e-4))

    def f(input):
        activation = _bn_relu(input)
        return Convolution2D(nb_filter=nb_filter, nb_row=nb_row, nb_col=nb_col, subsample=subsample,
                             init=init, border_mode=border_mode, W_regularizer=W_regularizer)(activation)

    return f
resnet.py 文件源码 项目:keras-mxnet-benchmarks 作者: sandeep-krishnamurthy 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _shortcut(input, residual):
    """Adds a shortcut between input and residual block and merges them with "sum"
    """
    # Expand channels of shortcut to match residual.
    # Stride appropriately to match residual (width, height)
    # Should be int if network architecture is correctly configured.
    input_shape = K.int_shape(input)
    residual_shape = K.int_shape(residual)
    stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
    stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
    equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]

    shortcut = input
    # 1 X 1 conv if shape is different. Else identity.
    if stride_width > 1 or stride_height > 1 or not equal_channels:
        shortcut = Convolution2D(nb_filter=residual_shape[CHANNEL_AXIS],
                                 nb_row=1, nb_col=1,
                                 subsample=(stride_width, stride_height),
                                 init="he_normal", border_mode="valid",
                                 W_regularizer=l2(0.0001))(input)

    return merge([shortcut, residual], mode="sum")
resnet.py 文件源码 项目:keras-mxnet-benchmarks 作者: sandeep-krishnamurthy 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def basic_block(nb_filter, init_subsample=(1, 1), is_first_block_of_first_layer=False):
    """Basic 3 X 3 convolution blocks for use on resnets with layers <= 34.
    Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
    """
    def f(input):

        if is_first_block_of_first_layer:
            # don't repeat bn->relu since we just did bn->relu->maxpool
            conv1 = Convolution2D(nb_filter=nb_filter,
                                 nb_row=3, nb_col=3,
                                 subsample=init_subsample,
                                 init="he_normal", border_mode="same",
                                 W_regularizer=l2(0.0001))(input)
        else:
            conv1 = _bn_relu_conv(nb_filter=nb_filter, nb_row=3, nb_col=3,
                                  subsample=init_subsample)(input)

        residual = _bn_relu_conv(nb_filter=nb_filter, nb_row=3, nb_col=3)(conv1)
        return _shortcut(input, residual)

    return f
resnet.py 文件源码 项目:keras-mxnet-benchmarks 作者: sandeep-krishnamurthy 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def bottleneck(nb_filter, init_subsample=(1, 1), is_first_block_of_first_layer=False):
    """Bottleneck architecture for > 34 layer resnet.
    Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf

    Returns:
        A final conv layer of nb_filter * 4
    """
    def f(input):

        if is_first_block_of_first_layer:
            # don't repeat bn->relu since we just did bn->relu->maxpool
            conv_1_1 = Convolution2D(nb_filter=nb_filter,
                                 nb_row=1, nb_col=1,
                                 subsample=init_subsample,
                                 init="he_normal", border_mode="same",
                                 W_regularizer=l2(0.0001))(input)
        else:
            conv_1_1 = _bn_relu_conv(nb_filter=nb_filter, nb_row=1, nb_col=1,
                                     subsample=init_subsample)(input)

        conv_3_3 = _bn_relu_conv(nb_filter=nb_filter, nb_row=3, nb_col=3)(conv_1_1)
        residual = _bn_relu_conv(nb_filter=nb_filter * 4, nb_row=1, nb_col=1)(conv_3_3)
        return _shortcut(input, residual)

    return f
densenet.py 文件源码 项目:DenseNet 作者: titu1994 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
    ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
    Args:
        ip: keras tensor
        nb_filter: number of filters
        compression: calculated as 1 - reduction. Reduces the number of feature maps
                    in the transition block.
        dropout_rate: dropout rate
        weight_decay: weight decay factor
    Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
    '''
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
    x = Activation('relu')(x)
    x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
               kernel_regularizer=l2(weight_decay))(x)
    x = AveragePooling2D((2, 2), strides=(2, 2))(x)

    return x
densenet_fast.py 文件源码 项目:DenseNet 作者: titu1994 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def conv_block(ip, nb_filter, dropout_rate=None, weight_decay=1E-4):
    ''' Apply BatchNorm, Relu 3x3, Conv2D, optional dropout

    Args:
        ip: Input keras tensor
        nb_filter: number of filters
        dropout_rate: dropout rate
        weight_decay: weight decay factor

    Returns: keras tensor with batch_norm, relu and convolution2d added

    '''

    x = Activation('relu')(ip)
    x = Convolution2D(nb_filter, 3, 3, init="he_uniform", border_mode="same", bias=False,
                      W_regularizer=l2(weight_decay))(x)
    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    return x
densenet_fast.py 文件源码 项目:DenseNet 作者: titu1994 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def transition_block(ip, nb_filter, dropout_rate=None, weight_decay=1E-4):
    ''' Apply BatchNorm, Relu 1x1, Conv2D, optional dropout and Maxpooling2D

    Args:
        ip: keras tensor
        nb_filter: number of filters
        dropout_rate: dropout rate
        weight_decay: weight decay factor

    Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool

    '''

    concat_axis = 1 if K.image_dim_ordering() == "th" else -1

    x = Convolution2D(nb_filter, 1, 1, init="he_uniform", border_mode="same", bias=False,
                      W_regularizer=l2(weight_decay))(ip)
    if dropout_rate:
        x = Dropout(dropout_rate)(x)
    x = AveragePooling2D((2, 2), strides=(2, 2))(x)

    x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(x)

    return x
model.py 文件源码 项目:BetaStock 作者: qweraqq 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def build(self):
        dim_data = self.size_of_input_data_dim
        nb_time_step = self.size_of_input_timesteps
        financial_time_series_input = Input(shape=(nb_time_step, dim_data))

        lstm_layer_1 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout, inner_activation='sigmoid',
                            W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                            return_sequences=True)
        lstm_layer_2 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout, inner_activation='sigmoid',
                            W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                            return_sequences=True)

        h1 = lstm_layer_1(financial_time_series_input)
        h2 = lstm_layer_2(h1)
        time_series_predictions = TimeDistributedDense(1)(h2)
        self.model = Model(financial_time_series_input, time_series_predictions,
                           name="deep rnn for financial time series forecasting")


问题


面经


文章

微信
公众号

扫码关注公众号