python类Adadelta()的实例源码

vae.py 文件源码 项目:KATE 作者: hugochan 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def fit(self, train_X, val_X, nb_epoch=50, batch_size=100):
        print 'Training variational autoencoder'
        optimizer = Adadelta(lr=2.)
        self.vae.compile(optimizer=optimizer, loss=self.vae_loss)

        self.vae.fit(train_X[0], train_X[1],
                shuffle=True,
                epochs=nb_epoch,
                batch_size=batch_size,
                validation_data=(val_X[0], val_X[1]),
                callbacks=[ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01),
                            EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=1, mode='auto'),
                            CustomModelCheckpoint(self.encoder, self.save_model, monitor='val_loss', save_best_only=True, mode='auto')
                        ]
                )

        return self
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def rcnn(input_shape, n_classes):
    """
    Input size should be [batch, 1d, ch] = (XXX, 3000, 1)
    """
    model = Sequential(name='RCNN test')
    model.add(Conv1D (kernel_size = (200), filters = 20, batch_input_shape=input_shape, activation='elu'))
    model.add(MaxPooling1D(pool_size = (20), strides=(10)))
    model.add(Conv1D (kernel_size = (20), filters = 200, activation='elu'))
    model.add(MaxPooling1D(pool_size = (10), strides=(3)))
    model.add(Conv1D (kernel_size = (20), filters = 200, activation='elu'))
    model.add(MaxPooling1D(pool_size = (10), strides=(3)))
    model.add(Dense (512, activation='elu'))
    model.add(Dense (512, activation='elu'))
    model.add(Reshape((1,model.output_shape[1])))
    model.add(LSTM(256, stateful=True, return_sequences=False))
    model.add(Dropout(0.3))
    model.add(Dense(n_classes, activation = 'sigmoid'))
    model.compile(loss='categorical_crossentropy', optimizer=Adadelta())
    return model
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def rnn_old(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 1, 15000, 1)
    """
    model = Sequential(name='Simple 1D CNN')
    model.add(keras.layers.LSTM(50, stateful=True, batch_input_shape=input_shape, return_sequences=False))
    model.add(Dense(n_classes, activation='sigmoid'))
    print(model.output_shape)
    model.compile(loss='categorical_crossentropy', optimizer=Adadelta(), metrics=[keras.metrics.categorical_accuracy])
    return model








#%% old models
utils.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def getOptimizer(optim, exp_decay, grad_norm_clip, lr = 0.001):
    """Function for setting up optimizer, combines several presets from
    published well performing models on SQuAD."""

    optimizers = {
        'Adam': Adam(lr=lr, decay=exp_decay, clipnorm=grad_norm_clip),
        'Adamax': Adamax(lr=lr, decay=exp_decay, clipnorm=grad_norm_clip),
        'Adadelta': Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, decay=exp_decay, clipnorm=grad_norm_clip)
    }

    try:
        optimizer = optimizers[optim]
    except KeyError as e:
        raise ValueError('problems with defining optimizer: {}'.format(e.args[0]))

    del (optimizers)
    return optimizer

# ------------------------------------------------------------------------------
# Data/model utilities.
# ------------------------------------------------------------------------------
deepae.py 文件源码 项目:KATE 作者: hugochan 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, feature_weights=None):
        print 'Training autoencoder'
        optimizer = Adadelta(lr=1.5)
        # optimizer = Adam()
        # optimizer = Adagrad()
        if feature_weights is None:
            self.autoencoder.compile(optimizer=optimizer, loss='binary_crossentropy') # kld, binary_crossentropy, mse
        else:
            print 'Using weighted loss'
            self.autoencoder.compile(optimizer=optimizer, loss=weighted_binary_crossentropy(feature_weights)) # kld, binary_crossentropy, mse

        self.autoencoder.fit(train_X[0], train_X[1],
                        nb_epoch=nb_epoch,
                        batch_size=batch_size,
                        shuffle=True,
                        validation_data=(val_X[0], val_X[1]),
                        callbacks=[
                                    ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01),
                                    EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=1, mode='auto'),
                                    # ModelCheckpoint(self.model_save_path, monitor='val_loss', save_best_only=True, verbose=0),
                        ]
                        )

        return self
ae.py 文件源码 项目:KATE 作者: hugochan 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, contractive=None):
        optimizer = Adadelta(lr=2.)
        # optimizer = Adam()
        # optimizer = Adagrad()
        if contractive:
            print 'Using contractive loss, lambda: %s' % contractive
            self.autoencoder.compile(optimizer=optimizer, loss=contractive_loss(self, contractive))
        else:
            print 'Using binary crossentropy'
            self.autoencoder.compile(optimizer=optimizer, loss='binary_crossentropy') # kld, binary_crossentropy, mse

        self.autoencoder.fit(train_X[0], train_X[1],
                        epochs=nb_epoch,
                        batch_size=batch_size,
                        shuffle=True,
                        validation_data=(val_X[0], val_X[1]),
                        callbacks=[
                                    ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01),
                                    EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=1, mode='auto'),
                                    CustomModelCheckpoint(self.encoder, self.save_model, monitor='val_loss', save_best_only=True, mode='auto')
                        ]
                        )

        return self
model.py 文件源码 项目:pydl 作者: rafaeltg 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_optimizer(self):

        if self.opt == 'sgd':
            return k_opt.SGD(lr=self.learning_rate, momentum=self.momentum)

        if self.opt == 'rmsprop':
            return k_opt.RMSprop(lr=self.learning_rate)

        if self.opt == 'adagrad':
            return k_opt.Adagrad(lr=self.learning_rate)

        if self.opt == 'adadelta':
            return k_opt.Adadelta(lr=self.learning_rate)

        if self.opt == 'adam':
            return k_opt.Adam(lr=self.learning_rate)

        raise Exception('Invalid optimization function - %s' % self.opt)
modular_neural_network.py 文件源码 项目:deep-learning-with-Keras 作者: decordoba 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self):
        filters1 = [16, 32, 64]  # filters1 = [4, 8, 16, 32, 64, 128, 256]
        filters2 = [16, 32, 64]  # filters2 = [4, 8, 16, 32, 64, 128, 256]
        losses1 = [losses.MSE, losses.MAE, losses.hinge, losses.categorical_crossentropy]  # losses1 = [losses.MSE, losses.MAE, losses.hinge, losses.categorical_crossentropy]
        optimizers1 = [optimizers.Adam()]  # optimizers1 = [optimizers.Adadelta(), optimizers.Adagrad(), optimizers.Adam(), optimizers.Adamax(), optimizers.SGD(), optimizers.RMSprop()]
        units1 = [16, 32, 64]  # units1 = [4, 8, 16, 32, 64, 128, 256]
        kernel_sizes1 = [(3, 3)]  # kernel_sizes = [(3, 3), (5, 5)]
        dropouts1 = [0.25]  # dropouts1 = [0.25, 0.5, 0.75]
        dropouts2 = [0.5]  # dropouts2 = [0.25, 0.5, 0.75]
        pool_sizes1 = [(2, 2)]  # pool_sizes1 = [(2, 2)]

        # create standard experiments structure
        self.experiments = {"filters1": filters1,
                            "filters2": filters2,
                            "losses1": losses1,
                            "units1": units1,
                            "optimizers1": optimizers1,
                            "kernel_sizes1": kernel_sizes1,
                            "dropouts1": dropouts1,
                            "dropouts2": dropouts2,
                            "pool_sizes1": pool_sizes1}
utils_models.py 文件源码 项目:auto_ml 作者: ClimbsRocks 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_optimizer(name='Adadelta'):
    if name == 'SGD':
        return optimizers.SGD(clipnorm=1.)
    if name == 'RMSprop':
        return optimizers.RMSprop(clipnorm=1.)
    if name == 'Adagrad':
        return optimizers.Adagrad(clipnorm=1.)
    if name == 'Adadelta':
        return optimizers.Adadelta(clipnorm=1.)
    if name == 'Adam':
        return optimizers.Adam(clipnorm=1.)
    if name == 'Adamax':
        return optimizers.Adamax(clipnorm=1.)
    if name == 'Nadam':
        return optimizers.Nadam(clipnorm=1.)

    return optimizers.Adam(clipnorm=1.)
categorical_crossentropy_example.py 文件源码 项目:keras-semantic-segmentation-example 作者: mrgloom 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def get_model():

    inputs = Input((IMAGE_H, IMAGE_W, INPUT_CHANNELS))

    base = models.get_fcn_vgg16_32s(inputs, NUMBER_OF_CLASSES)
    #base = models.get_fcn_vgg16_16s(inputs, NUMBER_OF_CLASSES)
    #base = models.get_fcn_vgg16_8s(inputs, NUMBER_OF_CLASSES)
    #base = models.get_unet(inputs, NUMBER_OF_CLASSES)
    #base = models.get_segnet_vgg16(inputs, NUMBER_OF_CLASSES)

    # softmax
    reshape= Reshape((-1,NUMBER_OF_CLASSES))(base)
    act = Activation('softmax')(reshape)

    model = Model(inputs=inputs, outputs=act)
    model.compile(optimizer=Adadelta(), loss='categorical_crossentropy')

    #print(model.summary())
    #sys.exit()

    return model
binary_crossentropy_example.py 文件源码 项目:keras-semantic-segmentation-example 作者: mrgloom 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def get_model():

    inputs = Input((IMAGE_H, IMAGE_W, INPUT_CHANNELS))

    base = models.get_fcn_vgg16_32s(inputs, NUMBER_OF_CLASSES)
    #base = models.get_fcn_vgg16_16s(inputs, NUMBER_OF_CLASSES)
    #base = models.get_fcn_vgg16_8s(inputs, NUMBER_OF_CLASSES)
    #base = models.get_unet(inputs, NUMBER_OF_CLASSES)
    #base = models.get_segnet_vgg16(inputs, NUMBER_OF_CLASSES)

    # sigmoid
    reshape= Reshape((-1,NUMBER_OF_CLASSES))(base)
    act = Activation('sigmoid')(reshape)

    model = Model(inputs=inputs, outputs=act)
    model.compile(optimizer=Adadelta(), loss='binary_crossentropy')

    #print(model.summary())
    #sys.exit()

    return model
categorical_crossentropy_example.py 文件源码 项目:keras-semantic-segmentation-example 作者: mrgloom 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def get_model():

    inputs = Input((IMAGE_H, IMAGE_W, INPUT_CHANNELS))

    base = models.get_fcn_vgg16_32s(inputs, NUMBER_OF_CLASSES)
    #base = models.get_fcn_vgg16_16s(inputs, NUMBER_OF_CLASSES)
    #base = models.get_fcn_vgg16_8s(inputs, NUMBER_OF_CLASSES)
    #base = models.get_unet(inputs, NUMBER_OF_CLASSES)
    #base = models.get_segnet_vgg16(inputs, NUMBER_OF_CLASSES)

    # softmax
    reshape= Reshape((-1,NUMBER_OF_CLASSES))(base)
    act = Activation('softmax')(reshape)

    model = Model(inputs=inputs, outputs=act)
    model.compile(optimizer=Adadelta(), loss='categorical_crossentropy')

    #print(model.summary())
    #sys.exit()

    return model
binary_crossentropy_example.py 文件源码 项目:keras-semantic-segmentation-example 作者: mrgloom 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_model():

    inputs = Input((IMAGE_H, IMAGE_W, INPUT_CHANNELS))

    base = models.get_fcn_vgg16_32s(inputs, NUMBER_OF_CLASSES)
    #base = models.get_fcn_vgg16_16s(inputs, NUMBER_OF_CLASSES)
    #base = models.get_fcn_vgg16_8s(inputs, NUMBER_OF_CLASSES)
    #base = models.get_unet(inputs, NUMBER_OF_CLASSES)
    #base = models.get_segnet_vgg16(inputs, NUMBER_OF_CLASSES)

    # sigmoid
    reshape= Reshape((-1,NUMBER_OF_CLASSES))(base)
    act = Activation('sigmoid')(reshape)

    model = Model(inputs=inputs, outputs=act)
    model.compile(optimizer=Adadelta(), loss='binary_crossentropy')

    #print(model.summary())
    #sys.exit()

    return model
optimizers.py 文件源码 项目:nea 作者: nusnlp 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_optimizer(args):

    clipvalue = 0
    clipnorm = 10

    if args.algorithm == 'rmsprop':
        optimizer = opt.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'sgd':
        optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'adagrad':
        optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'adadelta':
        optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'adam':
        optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'adamax':
        optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)

    return optimizer
viddesc_model.py 文件源码 项目:ABiViRNet 作者: lvapeab 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def setOptimizer(self, **kwargs):

        """
        Sets a new optimizer for the Translation_Model.
        :param **kwargs:
        """

        # compile differently depending if our model is 'Sequential' or 'Graph'
        if self.verbose > 0:
            logging.info("Preparing optimizer and compiling.")
        if self.params['OPTIMIZER'].lower() == 'adam':
            optimizer = Adam(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
        elif self.params['OPTIMIZER'].lower() == 'rmsprop':
            optimizer = RMSprop(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
        elif self.params['OPTIMIZER'].lower() == 'nadam':
            optimizer = Nadam(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
        elif self.params['OPTIMIZER'].lower() == 'adadelta':
            optimizer = Adadelta(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
        elif self.params['OPTIMIZER'].lower() == 'sgd':
            optimizer = SGD(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
        else:
            logging.info('\tWARNING: The modification of the LR is not implemented for the chosen optimizer.')
            optimizer = eval(self.params['OPTIMIZER'])
        self.model.compile(optimizer=optimizer, loss=self.params['LOSS'],
                           sample_weight_mode='temporal' if self.params['SAMPLE_WEIGHTS'] else None)
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def cnn1d(input_shape, n_classes ):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 1)
    """
    model = Sequential(name='1D CNN')
    model.add(Conv1D (kernel_size = (50), filters = 150, strides=5, input_shape=input_shape, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    print(model.output_shape)
    model.add(Conv1D (kernel_size = (8), filters = 200, strides=2, input_shape=input_shape, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    print(model.output_shape)
    model.add(MaxPooling1D(pool_size = (10), strides=(2)))
    print(model.output_shape)

    model.add(Conv1D (kernel_size = (8), filters = 400, strides=2, input_shape=input_shape, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    print(model.output_shape)
    model.add(Flatten())
    model.add(Dense (700, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense (700, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adadelta(), metrics=[keras.metrics.categorical_accuracy])
    return model
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def cnn1(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
    """
    model = Sequential(name='no_MP_small_filters')
    model.add(Conv1D (kernel_size = (10), filters = 64, strides=2, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (10), filters = 64, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (10), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (10), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (10), filters = 150, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Flatten())
    model.add(Dense (1024, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense (1024, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adadelta())
    return model
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def cnn2(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
    """
    model = Sequential(name='MP_small_filters')
    model.add(Conv1D (kernel_size = (10), filters = 64, strides=2, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())

    model.add(Conv1D (kernel_size = (10), filters = 64, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())

    model.add(Conv1D (kernel_size = (10), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())

    model.add(Flatten())
    model.add(Dense (500, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense (500, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adadelta())
    return model
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def cnn4(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
    """
    model = Sequential(name='large_kernel')
    model.add(Conv1D (kernel_size = (100), filters = 128, strides=10, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(Conv1D (kernel_size = (100), filters = 128, strides=1, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (100), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Flatten())
    model.add(Dense (768, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense (768, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adadelta())
    return model
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def cnn5(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
    """
    model = Sequential(name='very_large_kernel')
    model.add(Conv1D (kernel_size = (200), filters = 128, strides=3, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(Conv1D (kernel_size = (200), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (200), filters = 128, strides=1, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (10), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Flatten())
    model.add(Dense (768, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense (768, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adadelta())
    return model
test_optimizers.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_adadelta():
    _test_optimizer(Adadelta(), target=0.83)
    _test_optimizer(Adadelta(decay=1e-3), target=0.83)
KerasCallback.py 文件源码 项目:aetros-cli 作者: aetros 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_learning_rate(self):

        if hasattr(self.model, 'optimizer'):
            config = self.model.optimizer.get_config()

            from keras.optimizers import Adadelta, Adam, Adamax, Adagrad, RMSprop, SGD

            if isinstance(self.model.optimizer, Adadelta) or isinstance(self.model.optimizer, Adam) \
                    or isinstance(self.model.optimizer, Adamax) or isinstance(self.model.optimizer, Adagrad)\
                    or isinstance(self.model.optimizer, RMSprop) or isinstance(self.model.optimizer, SGD):
                return config['lr'] * (1. / (1. + config['decay'] * float(K.get_value(self.model.optimizer.iterations))))

            elif 'lr' in config:
                return config['lr']
train_val_FCN_DA.py 文件源码 项目:AdaptationSeg 作者: YangZhang4065 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def on_batch_end(self, batch, logs={}):
        if np.isnan(logs.get('loss')): #Model contain NaN
            print('NaN detected, reloading model')
            self.model.compile(optimizer=Adadelta(),
              loss={'output': SP_pixelwise_loss, 'output_2': layout_loss_hard},
              loss_weights={'output': 1.,'output_2':0.1})
            self.model.load_weights(output_name)
utils_models.py 文件源码 项目:auto_ml 作者: ClimbsRocks 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def make_deep_learning_model(hidden_layers=None, num_cols=None, optimizer='Adadelta', dropout_rate=0.2, weight_constraint=0, feature_learning=False, kernel_initializer='normal', activation='elu'):

    if feature_learning == True and hidden_layers is None:
        hidden_layers = [1, 0.75, 0.25]

    if hidden_layers is None:
        hidden_layers = [1, 0.75, 0.25]

    # The hidden_layers passed to us is simply describing a shape. it does not know the num_cols we are dealing with, it is simply values of 0.5, 1, and 2, which need to be multiplied by the num_cols
    scaled_layers = []
    for layer in hidden_layers:
        scaled_layers.append(min(int(num_cols * layer), 10))

    # If we're training this model for feature_learning, our penultimate layer (our final hidden layer before the "output" layer) will always have 10 neurons, meaning that we always output 10 features from our feature_learning model
    if feature_learning == True:
        scaled_layers.append(10)

    model = Sequential()

    model.add(Dense(scaled_layers[0], input_dim=num_cols, kernel_initializer=kernel_initializer, kernel_regularizer=regularizers.l2(0.01)))
    model.add(get_activation_layer(activation))

    for layer_size in scaled_layers[1:-1]:
        model.add(Dense(layer_size, kernel_initializer=kernel_initializer, kernel_regularizer=regularizers.l2(0.01)))
        model.add(get_activation_layer(activation))

    # There are times we will want the output from our penultimate layer, not the final layer, so give it a name that makes the penultimate layer easy to find
    model.add(Dense(scaled_layers[-1], kernel_initializer=kernel_initializer, name='penultimate_layer', kernel_regularizer=regularizers.l2(0.01)))
    model.add(get_activation_layer(activation))

    # For regressors, we want an output layer with a single node
    model.add(Dense(1, kernel_initializer=kernel_initializer))


    # The final step is to compile the model
    model.compile(loss='mean_squared_error', optimizer=get_optimizer(optimizer), metrics=['mean_absolute_error', 'mean_absolute_percentage_error'])

    return model
utils_models.py 文件源码 项目:auto_ml 作者: ClimbsRocks 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def make_deep_learning_classifier(hidden_layers=None, num_cols=None, optimizer='Adadelta', dropout_rate=0.2, weight_constraint=0, final_activation='sigmoid', feature_learning=False, activation='elu', kernel_initializer='normal'):

    if feature_learning == True and hidden_layers is None:
        hidden_layers = [1, 0.75, 0.25]

    if hidden_layers is None:
        hidden_layers = [1, 0.75, 0.25]

    # The hidden_layers passed to us is simply describing a shape. it does not know the num_cols we are dealing with, it is simply values of 0.5, 1, and 2, which need to be multiplied by the num_cols
    scaled_layers = []
    for layer in hidden_layers:
        scaled_layers.append(min(int(num_cols * layer), 10))

    # If we're training this model for feature_learning, our penultimate layer (our final hidden layer before the "output" layer) will always have 10 neurons, meaning that we always output 10 features from our feature_learning model
    if feature_learning == True:
        scaled_layers.append(10)


    model = Sequential()

    # There are times we will want the output from our penultimate layer, not the final layer, so give it a name that makes the penultimate layer easy to find
    model.add(Dense(scaled_layers[0], input_dim=num_cols, kernel_initializer=kernel_initializer, kernel_regularizer=regularizers.l2(0.01)))
    model.add(get_activation_layer(activation))

    for layer_size in scaled_layers[1:-1]:
        model.add(Dense(layer_size, kernel_initializer=kernel_initializer, kernel_regularizer=regularizers.l2(0.01)))
        model.add(get_activation_layer(activation))

    model.add(Dense(scaled_layers[-1], kernel_initializer=kernel_initializer, name='penultimate_layer', kernel_regularizer=regularizers.l2(0.01)))
    model.add(get_activation_layer(activation))

    model.add(Dense(1, kernel_initializer=kernel_initializer, activation=final_activation))
    model.compile(loss='binary_crossentropy', optimizer=get_optimizer(optimizer), metrics=['accuracy', 'poisson'])
    return model
test_optimizers.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_adadelta():
    _test_optimizer(Adadelta(), target=0.83)
    _test_optimizer(Adadelta(decay=1e-3), target=0.83)
model.py 文件源码 项目:CNN-Sentence-Classifier 作者: shagunsodhani 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _param_selector(args):
    '''Method to select parameters for models defined in Convolutional Neural Networks for
        Sentence Classification paper by Yoon Kim'''
    filtersize_list = [3, 4, 5]
    number_of_filters_per_filtersize = [100, 100, 100]
    pool_length_list = [2, 2, 2]
    dropout_list = [0.5, 0.5]
    optimizer = Adadelta(clipvalue=3)
    use_embeddings = True
    embeddings_trainable = False

    if (args.model_name.lower() == 'cnn-rand'):
        use_embeddings = False
        embeddings_trainable = True
    elif (args.model_name.lower() == 'cnn-static'):
        pass
    elif (args.model_name.lower() == 'cnn-non-static'):
        embeddings_trainable = True
    else:
        filtersize_list = [3, 4, 5]
        number_of_filters_per_filtersize = [150, 150, 150]
        pool_length_list = [2, 2, 2]
        dropout_list = [0.25, 0.5]
        optimizer = RMSprop(lr=args.learning_rate, decay=args.decay_rate,
                            clipvalue=args.grad_clip)
        use_embeddings = True
        embeddings_trainable = True
    return (filtersize_list, number_of_filters_per_filtersize, pool_length_list,
            dropout_list, optimizer, use_embeddings, embeddings_trainable)
run_utils.py 文件源码 项目:deep-mlsa 作者: spinningbytes 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_optimizer(config_data):
    options = config_data['optimizer']
    name = options['name']

    if name == 'adadelta':
        return optimizers.Adadelta(lr=options['lr'], rho=options['rho'], epsilon=options['epsilon'])
    else:
        return optimizers.SGD()
test_optimizers.py 文件源码 项目:keras 作者: NVIDIA 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_adadelta():
    _test_optimizer(Adadelta(), target=0.83)
    _test_optimizer(Adadelta(decay=1e-3), target=0.83)
AlexNet.py 文件源码 项目:Papers2Code 作者: rainer85ah 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def compile(self, optimizer='sgd'):

        optimizer_dicc = {'sgd': optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
                          'rmsprop': optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0),
                          'adagrad': optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0),
                          'adadelta': optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
                          'adam': optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)}

        self.model.compile(optimizer=optimizer_dicc[optimizer], loss='categorical_crossentropy', metrics=['accuracy'])
        return self.model


问题


面经


文章

微信
公众号

扫码关注公众号