python类SGD的实例源码

gru.py 文件源码 项目:LSTM-GRU-CNN-MLP 作者: ansleliu 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def build_model(layers):
    model = Sequential()

    model.add(GRU(input_dim=layers[0], output_dim=layers[1], activation='tanh', return_sequences=True))
    model.add(Dropout(0.15))  # Dropout overfitting

    # model.add(GRU(layers[2],activation='tanh', return_sequences=True))
    # model.add(Dropout(0.2))  # Dropout overfitting

    model.add(GRU(layers[2], activation='tanh', return_sequences=False))
    model.add(Dropout(0.15))  # Dropout overfitting

    model.add(Dense(output_dim=layers[3]))
    model.add(Activation("linear"))

    start = time.time()
    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    # model.compile(loss="mse", optimizer=sgd)
    model.compile(loss="mse", optimizer="rmsprop") # Nadam rmsprop
    print "Compilation Time : ", time.time() - start
    return model
cifar100_fractal.py 文件源码 项目:keras-fractalnet 作者: snf 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_network(deepest=False):
    dropout = [0., 0.1, 0.2, 0.3, 0.4]
    conv = [(64, 3, 3), (128, 3, 3), (256, 3, 3), (512, 3, 3), (512, 2, 2)]
    input= Input(shape=(3, 32, 32))
    output = fractal_net(
        c=3, b=5, conv=conv,
        drop_path=0.15, dropout=dropout,
        deepest=deepest)(input)
    output = Flatten()(output)
    output = Dense(NB_CLASSES, init='he_normal')(output)
    output = Activation('softmax')(output)
    model = Model(input=input, output=output)
    optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM)
    #optimizer = RMSprop(lr=LEARN_START)
    #optimizer = Adam()
    #optimizer = Nadam()
    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    plot(model, to_file='model.png')
    return model
mnist_net2net.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def make_teacher_model(train_data, validation_data, nb_epoch=3):
    '''Train a simple CNN as teacher model.
    '''
    model = Sequential()
    model.add(Conv2D(64, 3, 3, input_shape=input_shape,
                     border_mode='same', name='conv1'))
    model.add(MaxPooling2D(name='pool1'))
    model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2'))
    model.add(MaxPooling2D(name='pool2'))
    model.add(Flatten(name='flatten'))
    model.add(Dense(64, activation='relu', name='fc1'))
    model.add(Dense(nb_class, activation='softmax', name='fc2'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(lr=0.01, momentum=0.9),
                  metrics=['accuracy'])

    train_x, train_y = train_data
    history = model.fit(train_x, train_y, nb_epoch=nb_epoch,
                        validation_data=validation_data)
    return model, history
cifar10_fractal.py 文件源码 项目:keras-fractalnet 作者: snf 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def build_network(deepest=False):
    dropout = [0., 0.1, 0.2, 0.3, 0.4]
    conv = [(64, 3, 3), (128, 3, 3), (256, 3, 3), (512, 3, 3), (512, 2, 2)]
    input= Input(shape=(3, 32, 32) if K._BACKEND == 'theano' else (32, 32,3))
    output = fractal_net(
        c=3, b=5, conv=conv,
        drop_path=0.15, dropout=dropout,
        deepest=deepest)(input)
    output = Flatten()(output)
    output = Dense(NB_CLASSES, init='he_normal')(output)
    output = Activation('softmax')(output)
    model = Model(input=input, output=output)
    #optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM)
    #optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM, nesterov=True)
    optimizer = Adam()
    #optimizer = Nadam()
    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    plot(model, to_file='model.png', show_shapes=True)
    return model
train_cnn.py 文件源码 项目:five-video-classification-methods 作者: harvitronix 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def freeze_all_but_mid_and_top(model):
    """After we fine-tune the dense layers, train deeper."""
    # we chose to train the top 2 inception blocks, i.e. we will freeze
    # the first 172 layers and unfreeze the rest:
    for layer in model.layers[:172]:
        layer.trainable = False
    for layer in model.layers[172:]:
        layer.trainable = True

    # we need to recompile the model for these modifications to take effect
    # we use SGD with a low learning rate
    model.compile(
        optimizer=SGD(lr=0.0001, momentum=0.9),
        loss='categorical_crossentropy',
        metrics=['accuracy', 'top_k_categorical_accuracy'])

    return model
mnist_net2net.py 文件源码 项目:pCVR 作者: xjtushilei 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def make_teacher_model(train_data, validation_data, epochs=3):
    '''Train a simple CNN as teacher model.
    '''
    model = Sequential()
    model.add(Conv2D(64, 3, input_shape=input_shape,
                     padding='same', name='conv1'))
    model.add(MaxPooling2D(2, name='pool1'))
    model.add(Conv2D(64, 3, padding='same', name='conv2'))
    model.add(MaxPooling2D(2, name='pool2'))
    model.add(Flatten(name='flatten'))
    model.add(Dense(64, activation='relu', name='fc1'))
    model.add(Dense(num_class, activation='softmax', name='fc2'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(lr=0.01, momentum=0.9),
                  metrics=['accuracy'])

    train_x, train_y = train_data
    history = model.fit(train_x, train_y,
                        epochs=epochs,
                        validation_data=validation_data)
    return model, history
cnn.py 文件源码 项目:dogsVScats 作者: prajwalkr 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def runner(model, epochs):
    initial_LR = 0.001
    if not use_multiscale and not use_multicrop: training_gen, val_gen = DataGen()
    else: training_gen, val_gen = ms_traingen(), ms_valgen()

    model.compile(optimizer=SGD(initial_LR, momentum=0.9, nesterov=True), loss='binary_crossentropy')

    val_checkpoint = ModelCheckpoint('bestval.h5','val_loss',1, True)
    cur_checkpoint = ModelCheckpoint('current.h5')
    # def lrForEpoch(i): return initial_LR
    lrScheduler = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, cooldown=1, verbose=1)
    print 'Model compiled.'

    try:
        model.fit_generator(training_gen,samples_per_epoch,epochs,
                        verbose=1,validation_data=val_gen,nb_val_samples=nb_val_samples,
                        callbacks=[val_checkpoint, cur_checkpoint, lrScheduler])
    except Exception as e:
        print e
    finally:
        fname = dumper(model,'cnn')
        print 'Model saved to disk at {}'.format(fname)
        return model
importance_sampling.py 文件源码 项目:importance-sampling 作者: idiap 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def build_model(model, wrapper, dataset, hyperparams, reweighting):
    def build_optimizer(opt, hyperparams):
        return {
            "sgd": SGD(
                lr=hyperparams.get("lr", 0.001),
                momentum=hyperparams.get("momentum", 0.0)
            ),
            "adam": Adam(lr=hyperparams.get("lr", 0.001))
        }[opt]

    model = models.get(model)(dataset.shape, dataset.output_size)
    model.compile(
        optimizer=build_optimizer(
            hyperparams.get("opt", "adam"),
            hyperparams
        ),
        loss=model.loss,
        metrics=model.metrics
    )

    return get_models_dictionary(hyperparams, reweighting)[wrapper](model)
model.py 文件源码 项目:pydl 作者: rafaeltg 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_optimizer(self):

        if self.opt == 'sgd':
            return k_opt.SGD(lr=self.learning_rate, momentum=self.momentum)

        if self.opt == 'rmsprop':
            return k_opt.RMSprop(lr=self.learning_rate)

        if self.opt == 'adagrad':
            return k_opt.Adagrad(lr=self.learning_rate)

        if self.opt == 'adadelta':
            return k_opt.Adadelta(lr=self.learning_rate)

        if self.opt == 'adam':
            return k_opt.Adam(lr=self.learning_rate)

        raise Exception('Invalid optimization function - %s' % self.opt)
mnist_net2net.py 文件源码 项目:NetworkCompress 作者: luzai 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def make_teacher_model(train_data, validation_data, epochs=3):
    '''Train a simple CNN as teacher model.
    '''
    model = Sequential()
    model.add(Conv2D(64, 3, input_shape=input_shape,
                     padding='same', name='conv1'))
    model.add(MaxPooling2D(2, name='pool1'))
    model.add(Conv2D(64, 3, padding='same', name='conv2'))
    model.add(MaxPooling2D(2, name='pool2'))
    model.add(Flatten(name='flatten'))
    model.add(Dense(64, activation='relu', name='fc1'))
    model.add(Dense(num_class, activation='softmax', name='fc2'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(lr=0.01, momentum=0.9),
                  metrics=['accuracy'])

    train_x, train_y = train_data
    history = model.fit(train_x, train_y,
                        epochs=epochs,
                        validation_data=validation_data)
    return model, history
load_deepmodels.py 文件源码 项目:Youtube8mdataset_kagglechallenge 作者: jasonlee27 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def train(self, model, saveto_path=''):
        x_train, y_train = get_data(self.train_data_path, "train", "frame", self.feature_type)
        print('%d training frame level samples.' % len(x_train))
        x_valid, y_valid = get_data(self.valid_data_path, "valid", "frame", self.feature_type)
        print('%d validation frame level samples.' % len(x_valid))

        sgd = SGD(lr=0.01,
                  decay=1e-6,
                  momentum=0.9,
                  nesterov=True)
        model.compile(loss='categorical_crossentropy',
                      optimizer=sgd,
                      metrics=['accuracy'])

        callbacks = list()
        callbacks.append(CSVLogger(LOG_FILE))
        callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, min_lr=0.0001))

        if saveto_path:
            callbacks.append(ModelCheckpoint(filepath=MODEL_WEIGHTS, verbose=1))

        model.fit(x_train,
                  y_train,
                  epochs=5,
                  callbacks=callbacks,
                  validation_data=(x_valid, y_valid))

        # Save the weights on completion.
        if saveto_path:
            model.save_weights(saveto_path)
inception_flowers_tune.py 文件源码 项目:keras-surgeon 作者: BenWhetton 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def train_top_model():
    # Load the bottleneck features and labels
    train_features = np.load(open(output_dir+'bottleneck_features_train.npy', 'rb'))
    train_labels = np.load(open(output_dir+'bottleneck_labels_train.npy', 'rb'))
    validation_features = np.load(open(output_dir+'bottleneck_features_validation.npy', 'rb'))
    validation_labels = np.load(open(output_dir+'bottleneck_labels_validation.npy', 'rb'))

    # Create the top model for the inception V3 network, a single Dense layer
    # with softmax activation.
    top_input = Input(shape=train_features.shape[1:])
    top_output = Dense(5, activation='softmax')(top_input)
    model = Model(top_input, top_output)

    # Train the model using the bottleneck features and save the weights.
    model.compile(optimizer=SGD(lr=1e-4, momentum=0.9),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    csv_logger = CSVLogger(output_dir + 'top_model_training.csv')
    model.fit(train_features, train_labels,
              epochs=top_epochs,
              batch_size=batch_size,
              validation_data=(validation_features, validation_labels),
              callbacks=[csv_logger])
    model.save_weights(top_model_weights_path)
rnn-cnn-gan-enhancer.py 文件源码 项目:cnn-lstm-gan-music-generation 作者: MarkSeygan 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def generate(SONG_LENGTH, nb):

    generator = generator_model()
    generator.compile(loss='binary_crossentropy', optimizer="SGD")
    generator.load_weights('generator')

    print "loading_latent_music"
    latent_music = trainLoadMusic.loadMusic("lstm_outputs", SONG_LENGTH)

    for i in range(nb):

        latent = random.choice(latent_music)

        song = generator.predict(latent, verbose=1)

        song = song.reshape((SONG_LENGTH,note_span_with_ligatures/2,2))
        song_0 = generate_from_probabilities(song_0)
        matrixToMidi(song_0,'outputs/example {}'.format(i))
modular_neural_network.py 文件源码 项目:deep-learning-with-Keras 作者: decordoba 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self):
        filters1 = [16, 32, 64]  # filters1 = [4, 8, 16, 32, 64, 128, 256]
        filters2 = [16, 32, 64]  # filters2 = [4, 8, 16, 32, 64, 128, 256]
        losses1 = [losses.MSE, losses.MAE, losses.hinge, losses.categorical_crossentropy]  # losses1 = [losses.MSE, losses.MAE, losses.hinge, losses.categorical_crossentropy]
        optimizers1 = [optimizers.Adam()]  # optimizers1 = [optimizers.Adadelta(), optimizers.Adagrad(), optimizers.Adam(), optimizers.Adamax(), optimizers.SGD(), optimizers.RMSprop()]
        units1 = [16, 32, 64]  # units1 = [4, 8, 16, 32, 64, 128, 256]
        kernel_sizes1 = [(3, 3)]  # kernel_sizes = [(3, 3), (5, 5)]
        dropouts1 = [0.25]  # dropouts1 = [0.25, 0.5, 0.75]
        dropouts2 = [0.5]  # dropouts2 = [0.25, 0.5, 0.75]
        pool_sizes1 = [(2, 2)]  # pool_sizes1 = [(2, 2)]

        # create standard experiments structure
        self.experiments = {"filters1": filters1,
                            "filters2": filters2,
                            "losses1": losses1,
                            "units1": units1,
                            "optimizers1": optimizers1,
                            "kernel_sizes1": kernel_sizes1,
                            "dropouts1": dropouts1,
                            "dropouts2": dropouts2,
                            "pool_sizes1": pool_sizes1}
utils_models.py 文件源码 项目:auto_ml 作者: ClimbsRocks 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_optimizer(name='Adadelta'):
    if name == 'SGD':
        return optimizers.SGD(clipnorm=1.)
    if name == 'RMSprop':
        return optimizers.RMSprop(clipnorm=1.)
    if name == 'Adagrad':
        return optimizers.Adagrad(clipnorm=1.)
    if name == 'Adadelta':
        return optimizers.Adadelta(clipnorm=1.)
    if name == 'Adam':
        return optimizers.Adam(clipnorm=1.)
    if name == 'Adamax':
        return optimizers.Adamax(clipnorm=1.)
    if name == 'Nadam':
        return optimizers.Nadam(clipnorm=1.)

    return optimizers.Adam(clipnorm=1.)
mnist_net2net.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def make_teacher_model(train_data, validation_data, nb_epoch=3):
    '''Train a simple CNN as teacher model.
    '''
    model = Sequential()
    model.add(Conv2D(64, 3, 3, input_shape=input_shape,
                     border_mode='same', name='conv1'))
    model.add(MaxPooling2D(name='pool1'))
    model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2'))
    model.add(MaxPooling2D(name='pool2'))
    model.add(Flatten(name='flatten'))
    model.add(Dense(64, activation='relu', name='fc1'))
    model.add(Dense(nb_class, activation='softmax', name='fc2'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(lr=0.01, momentum=0.9),
                  metrics=['accuracy'])

    train_x, train_y = train_data
    history = model.fit(train_x, train_y, nb_epoch=nb_epoch,
                        validation_data=validation_data)
    return model, history
luna2016_3d_predict.py 文件源码 项目:huaat_ml_dl 作者: ieee820 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def predict_by_one(cube):
    # load json and create model
    json_file = open('model.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    # load weights into new model
    loaded_model.load_weights("model.hdf5")
    print("Loaded model from disk")
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    loaded_model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])
    x = cube.reshape(-1,1,6,20,20)
    print(x.shape)
    result = loaded_model.predict(x,batch_size=10, verbose=0)
    # print(result.shape)
    # show result
    for i in result:
        print(i[0],i[1])
    return result
cnn.py 文件源码 项目:HSICNN 作者: jamesbing 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def Net_model(lr=0.005,decay=1e-6,momentum=0.9):
    model = Sequential()
    model.add(Convolution2D(nb_filters1, nb_conv, nb_conv,
                            border_mode='valid',
                            input_shape=(1, img_rows, img_cols)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))

    model.add(Convolution2D(nb_filters2, nb_conv, nb_conv))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    #model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(1000)) #Full connection
    model.add(Activation('tanh'))
    #model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)

    return model
mlpclassification.py 文件源码 项目:LSTM-GRU-CNN-MLP 作者: ansleliu 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def build_model(layers):
    model = Sequential()

    model.add(Dense(layers[1], input_shape=(20,), activation='relu'))
    model.add(Dropout(0.2))  # Dropout overfitting

    # model.add(Dense(layers[2],activation='tanh'))
    # model.add(Dropout(0.2))  # Dropout overfitting

    model.add(Dense(layers[2], activation='relu'))
    model.add(Dropout(0.2))  # Dropout overfitting

    model.add(Dense(output_dim=layers[3]))
    model.add(Activation("softmax"))

    model.summary()

    start = time.time()
    # sgd = SGD(lr=0.5, decay=1e-6, momentum=0.9, nesterov=True)
    # model.compile(loss="mse", optimizer=sgd)
    model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=['accuracy']) # Nadam RMSprop()
    print "Compilation Time : ", time.time() - start
    return model
mlp.py 文件源码 项目:LSTM-GRU-CNN-MLP 作者: ansleliu 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def build_model(layers):
    model = Sequential()

    model.add(Dense(layers[1], input_shape=(20,), activation='tanh'))
    model.add(Dropout(0.2))  # Dropout overfitting

    # model.add(Dense(layers[2],activation='tanh'))
    # model.add(Dropout(0.2))  # Dropout overfitting

    model.add(Dense(layers[2], activation='tanh'))
    model.add(Dropout(0.2))  # Dropout overfitting

    model.add(Dense(output_dim=layers[3]))
    model.add(Activation("linear"))

    start = time.time()
    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    # model.compile(loss="mse", optimizer=sgd)
    model.compile(loss="mse", optimizer="adam") # Nadam
    print "Compilation Time : ", time.time() - start
    return model
base.py 文件源码 项目:motion-classification 作者: matthiasplappert 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def fit(self, X, y):
        assert isinstance(X, list)  #TODO: this should not be an assert
        assert len(y) > 0
        assert len(X) == len(y)

        X = pad_sequences(X)
        print X.shape, y.shape

        n_features = X.shape[2]
        self.n_labels_ = y.shape[1]
        print n_features, self.n_labels_

        model = Sequential()
        model.add(GRU(n_features, 128))
        model.add(Dropout(0.1))
        model.add(BatchNormalization(128))
        model.add(Dense(128, self.n_labels_))
        model.add(Activation('sigmoid'))

        sgd = opt.SGD(lr=0.005, decay=1e-6, momentum=0., nesterov=True)
        model.compile(loss='categorical_crossentropy', optimizer=sgd, class_mode='categorical')

        model.fit(X, y, batch_size=self.n_batch_size, nb_epoch=self.n_epochs, show_accuracy=True)
        self.model_ = model
train.py 文件源码 项目:tartarus 作者: sergiooramas 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def build_model(config):
    """Builds the cnn."""
    params = config.model_arch
    get_model = getattr(models, 'get_model_'+str(params['architecture']))
    model = get_model(params)
    #model = model_kenun.build_convnet_model(params)
    # Learning setup
    t_params = config.training_params
    sgd = SGD(lr=t_params["learning_rate"], decay=t_params["decay"],
              momentum=t_params["momentum"], nesterov=t_params["nesterov"])
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    optimizer = eval(t_params['optimizer'])
    metrics = ['mean_squared_error']
    if config.model_arch["final_activation"] == 'softmax':
        metrics.append('categorical_accuracy')
    if t_params['loss_func'] == 'cosine':
        loss_func = eval(t_params['loss_func'])
    else:
        loss_func = t_params['loss_func']
    model.compile(loss=loss_func, optimizer=optimizer,metrics=metrics)

    return model
classify_text.py 文件源码 项目:dsr16_nlp 作者: honnibal 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, widths, vocab_size=5000):
        from keras.models import Sequential
        from keras.layers import Embedding, Dense, TimeDistributedMerge
        from keras.layers.advanced_activations import ELU
        from keras.preprocessing.sequence import pad_sequences
        from keras.optimizers import SGD
        self.n_classes = widths[-1]
        self.vocab_size = vocab_size
        self.word_to_int = {}
        self.int_to_word = np.ndarray(shape=(vocab_size+1,), dtype='int64')
        self.model = Sequential()
        self.model.add(Embedding(vocab_size, widths[0]))
        self.model.add(TimeDistributedMerge(mode='ave'))
        for width in widths[1:-1]:
            layer = Dense(output_dim=hidden_width, init='he_normal', activation=ELU(1.0))
            self.model.add(layer)
        self.model.add(
            Dense(
                n_classes,
                init='zero',
                activation='softmax'))
        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        self.model.compile(loss='categorical_crossentropy', optimizer=sgd)
use_charnet.py 文件源码 项目:reading-text-in-the-wild 作者: mathDR 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, architecture_file=None, weight_file=None, optimizer=None):
        # Generate mapping for softmax layer to characters
        output_str = '0123456789abcdefghijklmnopqrstuvwxyz '
        self.output = [x for x in output_str]
        self.L = len(self.output)

        # Load model and saved weights
        from keras.models import model_from_json
        if architecture_file is None:
            self.model = model_from_json(open('char2_architecture.json').read())
        else:
            self.model = model_from_json(open(architecture_file).read())

        if weight_file is None:
            self.model.load_weights('char2_weights.h5')
        else:
            self.model.load_weights(weight_file)

        if optimizer is None:
            from keras.optimizers import SGD
            optimizer = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
        self.model.compile(loss='categorical_crossentropy', optimizer=optimizer)
mnist_net2net_gpu.py 文件源码 项目:keras-mxnet-benchmarks 作者: sandeep-krishnamurthy 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def make_teacher_model(train_data, validation_data, nb_epoch=3):
    '''Train a simple CNN as teacher model.
    '''
    model = Sequential()
    model.add(Conv2D(64, 3, 3, input_shape=input_shape,
                     border_mode='same', name='conv1'))
    model.add(MaxPooling2D(name='pool1'))
    model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2'))
    model.add(MaxPooling2D(name='pool2'))
    model.add(Flatten(name='flatten'))
    model.add(Dense(64, activation='relu', name='fc1'))
    model.add(Dense(nb_class, activation='softmax', name='fc2'))
    model = make_model(model, loss='categorical_crossentropy',
                  optimizer=SGD(lr=0.01, momentum=0.9),
                  metrics=['accuracy'])

    train_x, train_y = train_data
    history = model.fit(train_x, train_y, nb_epoch=nb_epoch,
                        validation_data=validation_data)
    return model, history
model_fit_history.py 文件源码 项目:Exoplanet-Artificial-Intelligence 作者: pearsonkyle 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def make_wave(maxlen):
    model = Sequential()
    # conv1
    model.add(Dense(64,input_dim=maxlen, kernel_initializer='he_normal',bias_initializer='zeros' ) )
    model.add(PRELU())
    model.add(Dropout(0.25))

    model.add(Dense(32))
    model.add(PRELU())

    model.add(Dense(8))
    model.add(PRELU())

    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    SGDsolver = SGD(lr=0.1, momentum=0.25, decay=0.0001, nesterov=True)
    model.compile(loss='binary_crossentropy',
                optimizer=SGDsolver,
                metrics=['accuracy'])
    return model
agent_utils.py 文件源码 项目:Reinforcement_Learning_Project 作者: AaronYALai 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def initAgent(neurons=512, layers=1, lr=1e-3,
              moment=0.9, width=19, alpha=0.1):
    """Initialize agent: specify num of neurons and hidden layers"""
    model = Sequential()
    model.add(Dense(2 * width**2, init='lecun_uniform',
              input_shape=(2 * width**2,)))
    model.add(LeakyReLU(alpha=alpha))

    for i in range(layers):
        model.add(Dense(neurons, init='lecun_uniform'))
        model.add(LeakyReLU(alpha=alpha))
        model.add(Dropout(0.2))

    model.add(Dense(width**2, init='lecun_uniform'))
    # use linear output layer to generate real-valued outputs
    model.add(Activation('linear'))

    # opt = RMSprop(lr=lr)
    opt = SGD(lr=lr, momentum=moment, decay=1e-18, nesterov=False)
    model.compile(loss='mse', optimizer=opt)

    return model
inceptionsvm.py 文件源码 项目:cancer_nn 作者: tanmoyopenroot 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def topModel(optimizer='adam', init='glorot_uniform'):
    # Create Model
    model = Sequential()
    model.add(Flatten(input_shape = (7, 7, 512) ))
    model.add(Dense(1024, kernel_initializer=init, activation="relu"))
    model.add(Dropout(0.7))    
    # model.add(Dense(4096, activation="relu"))
    model.add(Dense(1, kernel_initializer=init, activation="sigmoid"))

    # Compile model
    model.compile(loss='binary_crossentropy', optimizer=optimizer,  metrics=['accuracy'])

    # model.compile(
    #     loss='binary_crossentropy',
    #     optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
    #     metrics=['accuracy']
    # )

    return model
vgg16svm.py 文件源码 项目:cancer_nn 作者: tanmoyopenroot 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def topModel(optimizer='adam', init='glorot_uniform'):
    # Create Model
    model = Sequential()
    model.add(Flatten(input_shape = (7, 7, 512) ))
    model.add(Dense(256, kernel_initializer=init, activation="relu"))
    model.add(Dropout(0.7))    
    # model.add(Dense(4096, activation="relu"))
    model.add(Dense(1, kernel_initializer=init, activation="sigmoid"))

    # Compile model
    model.compile(loss='binary_crossentropy', optimizer=optimizer,  metrics=['accuracy'])

    # model.compile(
    #     loss='binary_crossentropy',
    #     optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
    #     metrics=['accuracy']
    # )

    return model
flowers_gridsearch.py 文件源码 项目:cv_ml 作者: techfort 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def create_model(learning_rate=0.1, momentum=0.9):
    model = Sequential()
    model.add(Convolution2D(20, 9, 9, border_mode='same', input_shape=(3, SIZE, SIZE)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
    model.add(Convolution2D(50, 5, 5, activation = "relu"))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
    model.add(Flatten())
    model.add(Dense(768, input_dim=3072, init='uniform', activation = 'relu'))
    model.add(Dropout(0.1))
    model.add(Dense(384, init = 'uniform',  activation = 'relu', W_constraint=maxnorm(3)))
    model.add(Dense(4))
    model.add(Activation("softmax"))
    sgd = SGD(lr=learning_rate, momentum=momentum, nesterov=True, decay=1e-6)
    model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=["accuracy"])
    return model


问题


面经


文章

微信
公众号

扫码关注公众号