python类PReLU()的实例源码

ikki_NN_1.py 文件源码 项目:stacking 作者: ikki407 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def build_model(self):
            model = Sequential()
            model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
            model.add(Dense(input_dim=nn_input_dim_NN, output_dim=110, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=110,output_dim=350, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=350,output_dim=150, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=150,output_dim=20, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))    
            sgd = SGD(lr=0.02, decay=1e-10, momentum=0.9, nesterov=True)

            model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')

            return KerasClassifier(nn=model,**self.params)
ikki_NN_1.py 文件源码 项目:stacking 作者: ikki407 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def build_model(self):
            model = Sequential()
            model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
            model.add(Dense(input_dim=nn_input_dim_NN, output_dim=110, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.3))
            model.add(Dense(input_dim=110,output_dim=200, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.5))
            model.add(Dense(input_dim=200,output_dim=60, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=60,output_dim=80, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.3))
            model.add(Dense(input_dim=80,output_dim=2, init='he_normal', activation='softmax'))    
            sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)

            model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')

            return KerasClassifier(nn=model,**self.params)
util.py 文件源码 项目:chess-deep-rl 作者: rajpurkar 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def conv_wrap(params, conv_out, i):
    from keras.layers.normalization import BatchNormalization
    from keras.layers.advanced_activations import PReLU
    from keras.layers.convolutional import Convolution2D
    from keras.layers import Dropout

    # use filter_width_K if it is there, otherwise use 3
    filter_key = "filter_width_%d" % i
    filter_width = params.get(filter_key, 3)
    num_filters = params["num_filters"]
    conv_out = Convolution2D(
        nb_filter=num_filters,
        nb_row=filter_width,
        nb_col=filter_width,
        init='he_normal',
        border_mode='same')(conv_out)
    conv_out = BatchNormalization()(conv_out)
    conv_out = PReLU()(conv_out)
    if params["dropout"] > 0:
        conv_out = Dropout(params["dropout"])(conv_out)
    return conv_out
model_zoo.py 文件源码 项目:visual_turing_test-tutorial 作者: mateuszmalinowski 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def deep_mlp(self):
        """
        Deep Multilayer Perceptrop.
        """
        if self._config.num_mlp_layers == 0:
            self.add(Dropout(0.5))
        else:
            for j in xrange(self._config.num_mlp_layers):
                self.add(Dense(self._config.mlp_hidden_dim))
                if self._config.mlp_activation == 'elu':
                    self.add(ELU())
                elif self._config.mlp_activation == 'leaky_relu':
                    self.add(LeakyReLU())
                elif self._config.mlp_activation == 'prelu':
                    self.add(PReLU())
                else:
                    self.add(Activation(self._config.mlp_activation))
                self.add(Dropout(0.5))
MTCNN.py 文件源码 项目:keras-mtcnn 作者: xiangrufan 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def create_Kao_Pnet( weight_path = 'model12old.h5'):
    input = Input(shape=[None, None, 3])
    x = Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(input)
    x = PReLU(shared_axes=[1,2],name='PReLU1')(x)
    x = MaxPool2D(pool_size=2)(x)
    x = Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(x)
    x = PReLU(shared_axes=[1,2],name='PReLU2')(x)
    x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(x)
    x = PReLU(shared_axes=[1,2],name='PReLU3')(x)
    classifier = Conv2D(2, (1, 1), activation='softmax', name='conv4-1')(x)
    bbox_regress = Conv2D(4, (1, 1), name='conv4-2')(x)
    model = Model([input], [classifier, bbox_regress])
    model.load_weights(weight_path, by_name=True)
    return model
n12_pepe_zoo.py 文件源码 项目:kaggle_yt8m 作者: N01Z3 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def fc_inception(input_tensor, n=3000, d=0.5):

    br1 = Dense(n)(input_tensor)
    br1 = LeakyReLU()(br1)
    br1 = BatchNormalization()(br1)
    br1 = Dropout(d)(br1)
    br1 = Dense(int(n/3.0))(br1)

    br2 = Dense(n)(input_tensor)
    br2 = BatchNormalization()(br2)
    br2 = ELU()(br2)
    br2 = Dropout(d)(br2)
    br2 = Dense(int(n/3.0))(br2)

    br3 = Dense(int(n/3.0))(input_tensor)
    br3 = BatchNormalization()(br3)
    br3 = PReLU()(br3)
    br3 = Dropout(d)(br3)
    br3 = Dense(int(n/3.0))(br3)
    br3 = BatchNormalization()(br3)
    br3 = PReLU()(br3)
    br3 = Dropout(d)(br3)
    br3 = Dense(int(n/3.0))(br3)
    br3 = BatchNormalization()(br3)
    br3 = PReLU()(br3)
    br3 = Dropout(d)(br3)

    x = merge([br1, br2, br3], mode='concat', concat_axis=1)
    return x
utils_models.py 文件源码 项目:auto_ml 作者: doordash 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def make_deep_learning_model(hidden_layers=None, num_cols=None, optimizer='adam', dropout_rate=0.2, weight_constraint=0, feature_learning=False):

    if feature_learning == True and hidden_layers is None:
        hidden_layers = [1, 1, 0.5]

    if hidden_layers is None:
        hidden_layers = [1, 1, 1]

    # The hidden_layers passed to us is simply describing a shape. it does not know the num_cols we are dealing with, it is simply values of 0.5, 1, and 2, which need to be multiplied by the num_cols
    scaled_layers = []
    for layer in hidden_layers:
        scaled_layers.append(int(num_cols * layer))

    # If we're training this model for feature_learning, our penultimate layer (our final hidden layer before the "output" layer) will always have 10 neurons, meaning that we always output 10 features from our feature_learning model
    if feature_learning == True:
        scaled_layers.append(10)

    model = Sequential()

    model.add(Dense(hidden_layers[0], input_dim=num_cols, kernel_initializer='normal', kernel_regularizer=regularizers.l2(0.01)))
    model.add(PReLU())

    for layer_size in scaled_layers[1:-1]:
        model.add(Dense(layer_size, kernel_initializer='normal', kernel_regularizer=regularizers.l2(0.01)))
        model.add(PReLU())

    # There are times we will want the output from our penultimate layer, not the final layer, so give it a name that makes the penultimate layer easy to find
    model.add(Dense(scaled_layers[-1], kernel_initializer='normal', name='penultimate_layer', kernel_regularizer=regularizers.l2(0.01)))
    model.add(PReLU())

    # For regressors, we want an output layer with a single node
    model.add(Dense(1, kernel_initializer='normal'))

    # The final step is to compile the model
    model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=['mean_absolute_error', 'mean_absolute_percentage_error'])

    return model
utils_models.py 文件源码 项目:auto_ml 作者: doordash 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def make_deep_learning_classifier(hidden_layers=None, num_cols=None, optimizer='adam', dropout_rate=0.2, weight_constraint=0, final_activation='sigmoid', feature_learning=False):

    if feature_learning == True and hidden_layers is None:
        hidden_layers = [1, 1, 0.5]

    if hidden_layers is None:
        hidden_layers = [1, 1, 1]

    # The hidden_layers passed to us is simply describing a shape. it does not know the num_cols we are dealing with, it is simply values of 0.5, 1, and 2, which need to be multiplied by the num_cols
    scaled_layers = []
    for layer in hidden_layers:
        scaled_layers.append(int(num_cols * layer))

    # If we're training this model for feature_learning, our penultimate layer (our final hidden layer before the "output" layer) will always have 10 neurons, meaning that we always output 10 features from our feature_learning model
    if feature_learning == True:
        scaled_layers.append(10)


    model = Sequential()

    # There are times we will want the output from our penultimate layer, not the final layer, so give it a name that makes the penultimate layer easy to find
    model.add(Dense(hidden_layers[0], input_dim=num_cols, kernel_initializer='normal', kernel_regularizer=regularizers.l2(0.01)))
    model.add(PReLU())

    for layer_size in scaled_layers[1:-1]:
        model.add(Dense(layer_size, kernel_initializer='normal', kernel_regularizer=regularizers.l2(0.01)))
        model.add(PReLU())

    model.add(Dense(scaled_layers[-1], kernel_initializer='normal', name='penultimate_layer', kernel_regularizer=regularizers.l2(0.01)))
    model.add(PReLU())

    model.add(Dense(1, kernel_initializer='normal', activation=final_activation))
    model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy', 'poisson'])
    return model
decoder.py 文件源码 项目:enet-keras 作者: PavlosMelissinos 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def bottleneck(encoder, output, upsample=False, reverse_module=False):
    internal = output // 4

    x = Conv2D(internal, (1, 1), use_bias=False)(encoder)
    x = BatchNormalization(momentum=0.1)(x)
    # x = Activation('relu')(x)
    x = PReLU(shared_axes=[1, 2])(x)
    if not upsample:
        x = Conv2D(internal, (3, 3), padding='same', use_bias=True)(x)
    else:
        x = Conv2DTranspose(filters=internal, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
    x = BatchNormalization(momentum=0.1)(x)
    # x = Activation('relu')(x)
    x = PReLU(shared_axes=[1, 2])(x)

    x = Conv2D(output, (1, 1), padding='same', use_bias=False)(x)

    other = encoder
    if encoder.get_shape()[-1] != output or upsample:
        other = Conv2D(output, (1, 1), padding='same', use_bias=False)(other)
        other = BatchNormalization(momentum=0.1)(other)
        if upsample and reverse_module is not False:
            other = MaxUnpooling2D()([other, reverse_module])

    if upsample and reverse_module is False:
        decoder = x
    else:
        x = BatchNormalization(momentum=0.1)(x)
        decoder = add([x, other])
        # decoder = Activation('relu')(decoder)
        decoder = PReLU(shared_axes=[1, 2])(decoder)

    return decoder
test_advanced_activations.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def test_prelu():
    from keras.layers.advanced_activations import PReLU
    layer_test(PReLU, kwargs={},
               input_shape=(2, 3, 4))
wavenet.py 文件源码 项目:eva 作者: israelg99 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def Wavenet(input_shape, filters, depth, stacks, last=0, h=None, build=True):
    # TODO: Soft targets? A float to make targets a gaussian with stdev.
    # TODO: Train only receptive field. The temporal-first outputs are computed from zero-padding.
    # TODO: Global conditioning?
    # TODO: Local conditioning?

    _, nb_bins = input_shape

    input_audio = Input(input_shape, name='audio_input')

    model = CausalAtrousConvolution1D(filters, 2, mask_type='A', atrous_rate=1, border_mode='valid')(input_audio)

    out, skip_connections = WavenetBlocks(filters, depth, stacks)(model)

    out = Merge(mode='sum', name='merging_skips')(skip_connections)
    out = PReLU()(out)

    out = Convolution1D(nb_bins, 1, border_mode='same')(out)
    out = PReLU()(out)

    out = Convolution1D(nb_bins, 1, border_mode='same')(out)

    # https://storage.googleapis.com/deepmind-live-cms/documents/BlogPost-Fig2-Anim-160908-r01.gif
    if last > 0:
        out = Lambda(lambda x: x[:, -last:], output_shape=(last, out._keras_shape[2]), name='last_out')(out)

    out = Activation('softmax')(out)

    if build:
        model = Model(input_audio, out)
        model.compile(Nadam(), 'sparse_categorical_crossentropy')

    return model
residual_block.py 文件源码 项目:eva 作者: israelg99 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __call__(self, model):
        # 2h -> h
        block = PReLU()(model)
        block = MaskedConvolution2D(self.filters//2, 1, 1)(block)

        # h 3x3 -> h
        block = PReLU()(block)
        block = MaskedConvolution2D(self.filters//2, 3, 3, border_mode='same')(block)

        # h -> 2h
        block = PReLU()(block)
        block = MaskedConvolution2D(self.filters, 1, 1)(block)

        return Merge(mode='sum')([model, block])
test_views.py 文件源码 项目:Fabrik 作者: Cloud-CV 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['PReLU']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = activation(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'PReLU')
layers_export.py 文件源码 项目:Fabrik 作者: Cloud-CV 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def activation(layer, layer_in, layerId):
    out = {}
    if (layer['info']['type'] == 'ReLU'):
        if (layer['params']['negative_slope'] != 0):
            out[layerId] = LeakyReLU(alpha=layer['params']['negative_slope'])(*layer_in)
        else:
            out[layerId] = Activation('relu')(*layer_in)
    elif (layer['info']['type'] == 'PReLU'):
        out[layerId] = PReLU()(*layer_in)
    elif (layer['info']['type'] == 'ELU'):
        out[layerId] = ELU(alpha=layer['params']['alpha'])(*layer_in)
    elif (layer['info']['type'] == 'ThresholdedReLU'):
        out[layerId] = ThresholdedReLU(theta=layer['params']['theta'])(*layer_in)
    elif (layer['info']['type'] == 'Sigmoid'):
        out[layerId] = Activation('sigmoid')(*layer_in)
    elif (layer['info']['type'] == 'TanH'):
        out[layerId] = Activation('tanh')(*layer_in)
    elif (layer['info']['type'] == 'Softmax'):
        out[layerId] = Activation('softmax')(*layer_in)
    elif (layer['info']['type'] == 'SELU'):
        out[layerId] = Activation('selu')(*layer_in)
    elif (layer['info']['type'] == 'Softplus'):
        out[layerId] = Activation('softplus')(*layer_in)
    elif (layer['info']['type'] == 'Softsign'):
        out[layerId] = Activation('softsign')(*layer_in)
    elif (layer['info']['type'] == 'HardSigmoid'):
        out[layerId] = Activation('hard_sigmoid')(*layer_in)
    return out
Model.py 文件源码 项目:NetworkCompress 作者: luzai 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def conv_pooling_layer(self, name, kernel_size, filters, kernel_regularizer_l2):
        def f(input):
            layer = Conv2D(kernel_size=kernel_size, filters=filters, name=name, padding='same',
                           kernel_regularizer=regularizers.l2(kernel_regularizer_l2))(input)
            layer = PReLU()(layer)
            layer = keras.layers.MaxPooling2D(name=name + '_maxpooling')(layer)
            return layer

        return f
Model.py 文件源码 项目:NetworkCompress 作者: luzai 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def group_layer(self, group_num, filters, name, kernel_regularizer_l2):
        def f(input):
            if group_num == 1:
                tower = Conv2D(filters, (1, 1), name=name + '_conv2d_0_1', padding='same',
                               kernel_initializer=IdentityConv())(input)
                tower = Conv2D(filters, (3, 3), name=name + '_conv2d_0_2', padding='same',
                               kernel_initializer=IdentityConv(),
                               kernel_regularizer=regularizers.l2(kernel_regularizer_l2))(tower)
                tower = PReLU()(tower)
                return tower
            else:
                group_output = []
                for i in range(group_num):
                    filter_num = filters / group_num
                    # if filters = 201, group_num = 4, make sure last group filters num = 51
                    if i == group_num - 1:  # last group
                        filter_num = filters - i * (filters / group_num)

                    tower = Conv2D(filter_num, (1, 1), name=name + '_conv2d_' + str(i) + '_1', padding='same',
                                   kernel_initializer=GroupIdentityConv(i, group_num))(input)
                    tower = Conv2D(filter_num, (3, 3), name=name + '_conv2d_' + str(i) + '_2', padding='same',
                                   kernel_initializer=IdentityConv(),
                                   kernel_regularizer=regularizers.l2(kernel_regularizer_l2))(tower)
                    tower = PReLU()(tower)
                    group_output.append(tower)

                if K.image_data_format() == 'channels_first':
                    axis = 1
                elif K.image_data_format() == 'channels_last':
                    axis = 3
                output = Concatenate(axis=axis)(group_output)

                return output

        return f
GA.py 文件源码 项目:NetworkCompress 作者: luzai 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def make_init_model(self):
        models = []

        input_data = Input(shape=self.gl_config.input_shape)
        import random
        init_model_index = random.randint(1, 4)
        init_model_index = 1
        if init_model_index == 1:  # one conv layer with kernel num = 64
            stem_conv_1 = Conv2D(128, 3, padding='same', name='conv2d1' )(input_data)
            stem_conv_1 = PReLU()(stem_conv_1)

        elif init_model_index == 2:  # two conv layers with kernel num = 64
            stem_conv_0 = Conv2D(128, 3, padding='same', name='conv2d1')(input_data)
            stem_conv_0 = PReLU()(stem_conv_0)
            stem_conv_1 = Conv2D(128, 3, padding='same', name='conv2d2')(stem_conv_0)
            stem_conv_1 = PReLU()(stem_conv_1)

        elif init_model_index == 3:  # one conv layer with a wider kernel num = 128
            stem_conv_1 = Conv2D(256, 3, padding='same', name='conv2d1')(input_data)
            stem_conv_1 = PReLU()(stem_conv_1)

        elif init_model_index == 4:  # two conv layers with a wider kernel_num = 128
            stem_conv_0 = Conv2D(256, 3, padding='same', name='conv2d1')(input_data)
            stem_conv_0 = PReLU()(stem_conv_0)
            stem_conv_1 = Conv2D(256, 3, padding='same', name='conv2d2')(stem_conv_0)
            stem_conv_1 = PReLU()(stem_conv_1)
        import keras
        stem_conv_1 = keras.layers.MaxPooling2D(name='maxpooling2d1')(stem_conv_1)
        stem_conv_1 = Conv2D(self.gl_config.nb_class, 3, padding='same', name='conv2d3')(stem_conv_1)
        stem_global_pooling_1 = GlobalMaxPooling2D(name='globalmaxpooling2d1')(stem_conv_1)
        stem_softmax_1 = Activation('softmax', name='activation1')(stem_global_pooling_1)

        model = Model(inputs=input_data, outputs=stem_softmax_1)

        return model
test_keras2_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def test_tiny_conv_prelu_random(self,
                                    model_precision=_MLMODEL_FULL_PRECISION):
        np.random.seed(1988)

        # Define a model
        from keras.layers.advanced_activations import PReLU
        model = Sequential()
        model.add(Conv2D(input_shape = (10, 10, 3),
            filters = 3, kernel_size = (5,5), padding = 'same'))
        model.add(PReLU(shared_axes=[1, 2]))

        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Get the coreml model
        self._test_keras_model(model, model_precision=model_precision)
test_keras_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_tiny_conv_prelu_random(self):
        np.random.seed(1988)

        # Define a model
        from keras.layers.advanced_activations import PReLU
        model = Sequential()
        model.add(Convolution2D(input_shape = (10, 10, 3),
            nb_filter = 3, nb_row = 5, nb_col = 5, border_mode = 'same'))
        model.add(PReLU(shared_axes=[1, 2]))

        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Get the coreml model
        self._test_keras_model(model)
utils_models.py 文件源码 项目:auto_ml 作者: ClimbsRocks 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_activation_layer(activation):
    if activation == 'LeakyReLU':
        return LeakyReLU()
    if activation == 'PReLU':
        return PReLU()
    if activation == 'ELU':
        return ELU()
    if activation == 'ThresholdedReLU':
        return ThresholdedReLU()

    return Activation(activation)

# TODO: same for optimizers, including clipnorm


问题


面经


文章

微信
公众号

扫码关注公众号