python类maxnorm()的实例源码

model.py 文件源码 项目:textfool 作者: bogdan-kulynych 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def build_model(max_length=1000,
                nb_filters=64,
                kernel_size=3,
                pool_size=2,
                regularization=0.01,
                weight_constraint=2.,
                dropout_prob=0.4,
                clear_session=True):
    if clear_session:
        K.clear_session()

    model = Sequential()
    model.add(Embedding(
        embeddings.shape[0],
        embeddings.shape[1],
        input_length=max_length,
        trainable=False,
        weights=[embeddings]))

    model.add(Conv1D(nb_filters, kernel_size, activation='relu'))
    model.add(Conv1D(nb_filters, kernel_size, activation='relu'))
    model.add(MaxPooling1D(pool_size))

    model.add(Dropout(dropout_prob))

    model.add(Conv1D(nb_filters * 2, kernel_size, activation='relu'))
    model.add(Conv1D(nb_filters * 2, kernel_size, activation='relu'))
    model.add(MaxPooling1D(pool_size))

    model.add(Dropout(dropout_prob))

    model.add(GlobalAveragePooling1D())
    model.add(Dense(1,
        kernel_regularizer=l2(regularization),
        kernel_constraint=maxnorm(weight_constraint),
        activation='sigmoid'))

    model.compile(
        loss='binary_crossentropy',
        optimizer='rmsprop',
        metrics=['accuracy'])

    return model
bnn.py 文件源码 项目:dac-training 作者: jlonij 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def create_model(data):
    '''
    Load keras model.
    '''
    # Entity branch
    entity_inputs = Input(shape=(data[0].shape[1],))
    entity_x = Dense(data[0].shape[1], activation='relu',
            kernel_constraint=maxnorm(3))(entity_inputs)
    entity_x = Dropout(0.25)(entity_x)
    #entity_x = Dense(50, activation='relu',
    #        kernel_constraint=maxnorm(3))(entity_x)
    #entity_x = Dropout(0.25)(entity_x)

    # Candidate branch
    candidate_inputs = Input(shape=(data[1].shape[1],))
    candidate_x = Dense(data[1].shape[1], activation='relu',
            kernel_constraint=maxnorm(3))(candidate_inputs)
    candidate_x = Dropout(0.25)(candidate_x)
    #candidate_x = Dense(50, activation='relu',
    #        kernel_constraint=maxnorm(3))(candidate_x)
    #candidate_x = Dropout(0.25)(candidate_x)

    # Cosine proximity
    # cos_x = dot([entity_x, candidate_x], axes=1, normalize=False)
    # cos_x = concatenate([entity_x, candidate_x])
    # cos_output = Dense(1, activation='sigmoid')(cos_x)

    # Match branch
    match_inputs = Input(shape=(data[2].shape[1],))
    match_x = Dense(data[1].shape[1], activation='relu',
            kernel_constraint=maxnorm(3))(match_inputs)
    match_x = Dropout(0.25)(match_x)

    # Merge
    x = concatenate([entity_x, candidate_x, match_x])
    x = Dense(32, activation='relu', kernel_constraint=maxnorm(3))(x)
    x = Dropout(0.25)(x)
    x = Dense(16, activation='relu', kernel_constraint=maxnorm(3))(x)
    x = Dropout(0.25)(x)
    x = Dense(8, activation='relu', kernel_constraint=maxnorm(3))(x)
    x = Dropout(0.25)(x)

    predictions = Dense(1, activation='sigmoid')(x)

    model = Model(inputs=[entity_inputs, candidate_inputs, match_inputs],
        outputs=predictions)
    model.compile(optimizer='RMSprop', loss='binary_crossentropy',
        metrics=['accuracy'])

    return model
with_locations_lstm.py 文件源码 项目:neural-CWS 作者: Akuchi612 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def build_model(data, word_weights, max_len, tag_window=5, embed_dim=100, location_dim=10):
    batch_size = 2048
    nb_epoch = 16
    nb_class = 4
    hidden_dim = 128

    train_x = np.array(list(data['x']))
    train_l = np.array(list(data['l']))
    train_y = np.array(list(data['y']))
    train_y = np_utils.to_categorical(train_y, nb_class)

    print(train_x.shape)
    print(train_l.shape)
    print(train_y.shape)
    input_x = Input(shape=(tag_window, ), dtype='float32', name='input_x')
    input_l = Input(shape=(tag_window, ), dtype='float32', name='input_l')

    embed_x = Embedding(output_dim=embed_dim, 
            input_dim=word_weights.shape[0],
            input_length=tag_window,
            weights=[word_weights],
            name='embed_x')(input_x)
    embed_l = Embedding(output_dim=location_dim, 
            input_dim=max_len,
            input_length=tag_window,
            name='embed_l')(input_l)

    merge_embed = merge([embed_x, embed_l],
            mode='concat', concat_axis=2)
    bi_lstm = Bidirectional(LSTM(hidden_dim, return_sequences=False), merge_mode='sum')(merge_embed)
    x_dropout = Dropout(0.5)(bi_lstm)
    x_output = Dense(nb_class,
        # kernel_regularizer=regularizers.l2(0.01),
        # kernel_constraint=maxnorm(3.0),
        # activity_regularizer=regularizers.l2(0.01),
        activation='softmax')(x_dropout)
    model = Model(inputs=[input_x, input_l], outputs=[x_output])
    model.compile(optimizer='adamax', loss='categorical_crossentropy',metrics=['accuracy'])
    print('Train...')
    model_path = './model/location_128hidden_2048batch'
    modelcheckpoint = ModelCheckpoint(model_path, verbose=1, save_best_only=True)
    model.fit([train_x, train_l], [train_y], validation_split=0.2, 
            batch_size=batch_size, epochs=nb_epoch, shuffle=True)


问题


面经


文章

微信
公众号

扫码关注公众号