python类Adam()的实例源码

cifar100_fractal.py 文件源码 项目:keras-fractalnet 作者: snf 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def build_network(deepest=False):
    dropout = [0., 0.1, 0.2, 0.3, 0.4]
    conv = [(64, 3, 3), (128, 3, 3), (256, 3, 3), (512, 3, 3), (512, 2, 2)]
    input= Input(shape=(3, 32, 32))
    output = fractal_net(
        c=3, b=5, conv=conv,
        drop_path=0.15, dropout=dropout,
        deepest=deepest)(input)
    output = Flatten()(output)
    output = Dense(NB_CLASSES, init='he_normal')(output)
    output = Activation('softmax')(output)
    model = Model(input=input, output=output)
    optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM)
    #optimizer = RMSprop(lr=LEARN_START)
    #optimizer = Adam()
    #optimizer = Nadam()
    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    plot(model, to_file='model.png')
    return model
genderclassifier.py 文件源码 项目:namegenderclassifier 作者: joaoalvarenga 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def train(self, dataset, train_split=0.8, dense_size=32, learning_rate=0.001, batch_size=32, epochs=50, activation='relu'):
        self.__load_dataset(dataset, train_split)

        train_x = np.array(self.__train_data[:, 0].tolist())
        train_y = to_categorical(self.__train_data[:, 1], 2)

        test_x = np.array(self.__test_data[:, 0].tolist())
        test_y = to_categorical(self.__test_data[:, 1], 2)

        print(train_x.shape)
        self.__model = Sequential()
        self.__model.add(Dense(dense_size, input_dim=train_x.shape[1], activation=activation, init='glorot_uniform'))
        self.__model.add(Dense(train_y.shape[1], activation='softmax', init='glorot_uniform'))
        self.__model.compile(optimizer=Adam(lr=0.001), loss='categorical_crossentropy', metrics=['categorical_accuracy'])

        self.__model.fit(train_x, train_y, batch_size=batch_size, nb_epoch=epochs, validation_data=(test_x, test_y), verbose=2)
cifar10_fractal.py 文件源码 项目:keras-fractalnet 作者: snf 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def build_network(deepest=False):
    dropout = [0., 0.1, 0.2, 0.3, 0.4]
    conv = [(64, 3, 3), (128, 3, 3), (256, 3, 3), (512, 3, 3), (512, 2, 2)]
    input= Input(shape=(3, 32, 32) if K._BACKEND == 'theano' else (32, 32,3))
    output = fractal_net(
        c=3, b=5, conv=conv,
        drop_path=0.15, dropout=dropout,
        deepest=deepest)(input)
    output = Flatten()(output)
    output = Dense(NB_CLASSES, init='he_normal')(output)
    output = Activation('softmax')(output)
    model = Model(input=input, output=output)
    #optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM)
    #optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM, nesterov=True)
    optimizer = Adam()
    #optimizer = Nadam()
    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    plot(model, to_file='model.png', show_shapes=True)
    return model
models.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def compile_scae(model, lr=None):
        '''
        Compile the model
        '''

        # Optimizer values
        lr = 0.02 if lr is None else lr
        beta_1 = 0.9
        beta_2 = 0.999
        epsilon = 10 ** (-8)
        optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, clipnorm=1.)

        model.compile(
            optimizer=optimizer,
            loss=[lambda y_true, y_pred: y_pred],
        )

        return model
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def largeann(input_shape, n_classes, layers=3, neurons=2000, dropout=0.35 ):
    """
    for working with extracted features
    """
#    gpu = switch_gpu()
#    with K.tf.device('/gpu:{}'.format(gpu)):
#        K.set_session(K.tf.Session(config=K.tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)))
    model = Sequential(name='ann')
#    model.gpu = gpu
    for l in range(layers):
        model.add(Dense (neurons, input_shape=input_shape, activation='elu', kernel_initializer='he_normal'))
        model.add(BatchNormalization())
        model.add(Dropout(dropout))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[keras.metrics.categorical_accuracy])
    return model

#%% everyhing recurrent for ANN
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _init_from_saved(self, fname):

        with open(fname + '_opt.json', 'r') as opt_file:
            self.opt = json.load(opt_file)

        if self.model_type == 'nn':
            if self.model_name == 'cnn_word':
                self.model = self.cnn_word_model()
            if self.model_name == 'lstm_word':
                self.model = self.lstm_word_model()

            optimizer = Adam(lr=self.opt['learning_rate'], decay=self.opt['learning_decay'])
            self.model.compile(loss='binary_crossentropy',
                               optimizer=optimizer,
                               metrics=['binary_accuracy'])
            print('[ Loading model weights %s ]' % fname)
            self.model.load_weights(fname + '.h5')

        if self.model_type == 'ngrams':
            with open(fname + '_cls.pkl', 'rb') as model_file:
                self.model = pickle.load(model_file)
            print('CLS:', self.model)
utils.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def getOptimizer(optim, exp_decay, grad_norm_clip, lr = 0.001):
    """Function for setting up optimizer, combines several presets from
    published well performing models on SQuAD."""

    optimizers = {
        'Adam': Adam(lr=lr, decay=exp_decay, clipnorm=grad_norm_clip),
        'Adamax': Adamax(lr=lr, decay=exp_decay, clipnorm=grad_norm_clip),
        'Adadelta': Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, decay=exp_decay, clipnorm=grad_norm_clip)
    }

    try:
        optimizer = optimizers[optim]
    except KeyError as e:
        raise ValueError('problems with defining optimizer: {}'.format(e.args[0]))

    del (optimizers)
    return optimizer

# ------------------------------------------------------------------------------
# Data/model utilities.
# ------------------------------------------------------------------------------
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _init_from_scratch(self):
        """Initialize a model from scratch."""

        if self.model_name == 'bmwacor':
            self.model = self.bmwacor_model()
        if self.model_name == 'bilstm_split':
            self.model = self.bilstm_split_model()
        if self.model_name == 'full_match':
            self.model = self.full_match_model()
        if self.model_name == 'maxpool_match':
            self.model = self.maxpool_match_model()
        if self.model_name == 'att_match':
            self.model = self.att_match_model()
        if self.model_name == 'maxatt_match':
            self.model = self.maxatt_match_model()
        if self.model_name == 'bilstm_woatt':
            self.model = self.bilstm_woatt_model()
        optimizer = Adam(lr=self.learning_rate)
        self.model.compile(loss='binary_crossentropy',
                           optimizer=optimizer,
                           metrics=['accuracy', fbeta_score])
deepae.py 文件源码 项目:KATE 作者: hugochan 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, feature_weights=None):
        print 'Training autoencoder'
        optimizer = Adadelta(lr=1.5)
        # optimizer = Adam()
        # optimizer = Adagrad()
        if feature_weights is None:
            self.autoencoder.compile(optimizer=optimizer, loss='binary_crossentropy') # kld, binary_crossentropy, mse
        else:
            print 'Using weighted loss'
            self.autoencoder.compile(optimizer=optimizer, loss=weighted_binary_crossentropy(feature_weights)) # kld, binary_crossentropy, mse

        self.autoencoder.fit(train_X[0], train_X[1],
                        nb_epoch=nb_epoch,
                        batch_size=batch_size,
                        shuffle=True,
                        validation_data=(val_X[0], val_X[1]),
                        callbacks=[
                                    ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01),
                                    EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=1, mode='auto'),
                                    # ModelCheckpoint(self.model_save_path, monitor='val_loss', save_best_only=True, verbose=0),
                        ]
                        )

        return self
model.py 文件源码 项目:Sentiment-Analysis 作者: jasonwu0731 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, n_classes, vocab_size, max_len, num_units=128,
                 useBiDirection=False, useAttention=False, learning_rate=0.001, dropout=0, embedding_size=300):
        self.model = Sequential()
        self.model.add(Embedding(input_dim=vocab_size,
                                 output_dim=embedding_size, input_length=max_len))
        lstm_model = LSTM(num_units, dropout=dropout)
        if useBiDirection:
            lstm_model = Bidirectional(lstm_model)
        if useAttention:
            lstm_model = lstm_model
            print("Attention not implement yet ... ")
        self.model.add(lstm_model)
        self.model.add(Dense(n_classes, activation='softmax'))

        self.model.summary()
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=Adam(lr=learning_rate),
                           metrics=['accuracy'])
models.py 文件源码 项目:keras-image-captioning 作者: danieljl 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build(self, vocabs=None):
        if self._keras_model:
            return
        if vocabs is None and self._word_vector_init is not None:
            raise ValueError('If word_vector_init is not None, build method '
                             'must be called with vocabs that are not None!')

        image_input, image_embedding = self._build_image_embedding()
        sentence_input, word_embedding = self._build_word_embedding(vocabs)
        sequence_input = Concatenate(axis=1)([image_embedding, word_embedding])
        sequence_output = self._build_sequence_model(sequence_input)

        model = Model(inputs=[image_input, sentence_input],
                      outputs=sequence_output)
        model.compile(optimizer=Adam(lr=self._learning_rate, clipnorm=5.0),
                      loss=categorical_crossentropy_from_logits,
                      metrics=[categorical_accuracy_with_variable_timestep])

        self._keras_model = model
model.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def report(self,train_data,
               epoch=200,batch_size=1000,optimizer=Adam(0.001),
               test_data=None,
               train_data_to=None,
               test_data_to=None,):
        test_data     = train_data if test_data is None else test_data
        train_data_to = train_data if train_data_to is None else train_data_to
        test_data_to  = test_data  if test_data_to is None else test_data_to
        opts = {'verbose':0,'batch_size':batch_size}
        def test_both(msg, fn):
            print(msg.format(fn(train_data)))
            if test_data is not None:
                print((msg+" (validation)").format(fn(test_data)))
        self.autoencoder.compile(optimizer=optimizer, loss=bce)
        test_both("Reconstruction BCE: {}",
                  lambda data: self.autoencoder.evaluate(data,data,**opts))
        return self
models.py 文件源码 项目:keras-tf-Super-Resolution 作者: olgaliak 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def create_model(self, height=32, width=32, channels=3, load_weights=False, batch_size=128):
        """
            Creates a model to be used to scale images of specific height and width.
        """
        init = super(ImageSuperResolutionModel, self).create_model(height, width, channels, load_weights, batch_size)

        x = Convolution2D(self.n1, self.f1, self.f1, activation='relu', border_mode='same', name='level1')(init)
        x = Convolution2D(self.n2, self.f2, self.f2, activation='relu', border_mode='same', name='level2')(x)

        out = Convolution2D(channels, self.f3, self.f3, border_mode='same', name='output')(x)

        model = Model(init, out)

        adam = optimizers.Adam(lr=1e-3)
        model.compile(optimizer=adam, loss='mse', metrics=[PSNRLoss])
        if load_weights: model.load_weights(self.weight_path)

        self.model = model
        return model
models.py 文件源码 项目:keras-tf-Super-Resolution 作者: olgaliak 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def create_model(self, height=32, width=32, channels=3, load_weights=False, batch_size=128):
        """
            Creates a model to be used to scale images of specific height and width.
        """
        init = super(ExpantionSuperResolution, self).create_model(height, width, channels, load_weights, batch_size)

        x = Convolution2D(self.n1, self.f1, self.f1, activation='relu', border_mode='same', name='level1')(init)

        x1 = Convolution2D(self.n2, self.f2_1, self.f2_1, activation='relu', border_mode='same', name='lavel1_1')(x)
        x2 = Convolution2D(self.n2, self.f2_2, self.f2_2, activation='relu', border_mode='same', name='lavel1_2')(x)
        x3 = Convolution2D(self.n2, self.f2_3, self.f2_3, activation='relu', border_mode='same', name='lavel1_3')(x)

        x = merge([x1, x2, x3], mode='ave')

        out = Convolution2D(channels, self.f3, self.f3, activation='relu', border_mode='same', name='output')(x)

        model = Model(init, out)
        adam = optimizers.Adam(lr=1e-3)
        model.compile(optimizer=adam, loss='mse', metrics=[PSNRLoss])
        if load_weights: model.load_weights(self.weight_path)

        self.model = model
        return model
triplet_movielens.py 文件源码 项目:CCIR 作者: xiaogang00 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_graph(num_users, num_items, latent_dim):

    model = Graph()
    model.add_input(name='user_input', input_shape=(num_users,))
    model.add_input(name='positive_item_input', input_shape=(num_items,))
    model.add_input(name='negative_item_input', input_shape=(num_items,))

    model.add_node(layer=Dense(latent_dim, input_shape = (num_users,)),
                   name='user_latent',
                   input='user_input')
    model.add_shared_node(layer=Dense(latent_dim, input_shape = (num_items,)), 
                          name='item_latent', 
                          inputs=['positive_item_input', 'negative_item_input'],
                          merge_mode=None, 
                          outputs=['positive_item_latent', 'negative_item_latent'])

    model.add_node(layer=Activation('linear'), name='user_pos', inputs=['user_latent', 'positive_item_latent'], merge_mode='dot', dot_axes=1)
    model.add_node(layer=Activation('linear'), name='user_neg', inputs=['user_latent', 'negative_item_latent'], merge_mode='dot', dot_axes=1)

    model.add_output(name='triplet_loss_out', inputs=['user_pos', 'user_neg'])
    model.compile(loss={'triplet_loss_out': ranking_loss}, optimizer=Adam())#Adagrad(lr=0.1, epsilon=1e-06))

    return model
CNNModel3.py 文件源码 项目:CCIR 作者: xiaogang00 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def cnn(height, width):
    question_input = Input(shape=(height, width, 1), name='question_input')
    conv1_Q = Conv2D(512, (2, 320), activation='sigmoid', padding='valid',
                     kernel_regularizer=regularizers.l2(0.01),
                     kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.02))(question_input)
    Max1_Q = MaxPooling2D((29, 1), strides=(1, 1), padding='valid')(conv1_Q)
    F1_Q = Flatten()(Max1_Q)
    Drop1_Q = Dropout(0.25)(F1_Q)
    predictQ = Dense(32, activation='relu',
                     kernel_regularizer=regularizers.l2(0.01),
                     kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.02))(Drop1_Q)
    prediction2 = Dropout(0.25)(predictQ)
    predictions = Dense(1, activation='relu')(prediction2)
    model = Model(inputs=[question_input],
                  outputs=predictions)

    model.compile(loss='mean_squared_error',
                  optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0))
    # model.compile(loss='mean_squared_error',
    #             optimizer='nadam')
    return model
importance_sampling.py 文件源码 项目:importance-sampling 作者: idiap 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def build_model(model, wrapper, dataset, hyperparams, reweighting):
    def build_optimizer(opt, hyperparams):
        return {
            "sgd": SGD(
                lr=hyperparams.get("lr", 0.001),
                momentum=hyperparams.get("momentum", 0.0)
            ),
            "adam": Adam(lr=hyperparams.get("lr", 0.001))
        }[opt]

    model = models.get(model)(dataset.shape, dataset.output_size)
    model.compile(
        optimizer=build_optimizer(
            hyperparams.get("opt", "adam"),
            hyperparams
        ),
        loss=model.loss,
        metrics=model.metrics
    )

    return get_models_dictionary(hyperparams, reweighting)[wrapper](model)
a02_zoo.py 文件源码 项目:KAGGLE_CERVICAL_CANCER_2017 作者: ZFTurbo 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def VGG_16_KERAS(classes_number, optim_name='Adam', learning_rate=-1):
    from keras.layers.core import Dense, Dropout, Flatten
    from keras.applications.vgg16 import VGG16
    from keras.models import Model

    base_model = VGG16(include_top=True, weights='imagenet')
    x = base_model.layers[-2].output
    del base_model.layers[-1:]
    x = Dense(classes_number, activation='softmax', name='predictions')(x)
    vgg16 = Model(input=base_model.input, output=x)

    optim = get_optim('VGG16_KERAS', optim_name, learning_rate)
    vgg16.compile(optimizer=optim, loss='categorical_crossentropy', metrics=['accuracy'])
    # print(vgg16.summary())
    return vgg16


# MIN: 1.00 Fast: 60 sec
a02_zoo.py 文件源码 项目:KAGGLE_CERVICAL_CANCER_2017 作者: ZFTurbo 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def VGG_16_2_v2(classes_number, optim_name='Adam', learning_rate=-1):
    from keras.layers.core import Dense, Dropout, Flatten
    from keras.applications.vgg16 import VGG16
    from keras.models import Model
    from keras.layers import Input

    input_tensor = Input(shape=(3, 224, 224))
    base_model = VGG16(input_tensor=input_tensor, include_top=False, weights='imagenet')
    x = base_model.output
    x = Flatten()(x)
    x = Dense(256, activation='relu')(x)
    x = Dropout(0.2)(x)
    x = Dense(256, activation='relu')(x)
    x = Dropout(0.2)(x)
    x = Dense(classes_number, activation='softmax', name='predictions')(x)
    vgg16 = Model(input=base_model.input, output=x)

    optim = get_optim('VGG16_KERAS', optim_name, learning_rate)
    vgg16.compile(optimizer=optim, loss='categorical_crossentropy', metrics=['accuracy'])
    # print(vgg16.summary())
    return vgg16
a02_zoo.py 文件源码 项目:KAGGLE_CERVICAL_CANCER_2017 作者: ZFTurbo 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def Xception_wrapper(classes_number, optim_name='Adam', learning_rate=-1):
    from keras.layers.core import Dense, Dropout, Flatten
    from keras.applications.xception import Xception
    from keras.models import Model

    # Only tensorflow
    base_model = Xception(include_top=True, weights='imagenet')
    x = base_model.layers[-2].output
    del base_model.layers[-1:]
    x = Dense(classes_number, activation='softmax', name='predictions')(x)
    model = Model(input=base_model.input, output=x)

    optim = get_optim('Xception_wrapper', optim_name, learning_rate)
    model.compile(optimizer=optim, loss='categorical_crossentropy', metrics=['accuracy'])
    print(model.summary())
    return model
train_policy_gradient.py 文件源码 项目:strategy 作者: kanghua309 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def build_model(self):
        model = Sequential()
        model.add(Dense(self.hidden1, input_dim=self.state_size, activation='relu', kernel_initializer='glorot_uniform'))
        model.add(Dense(self.hidden2, activation='relu', kernel_initializer='glorot_uniform'))
        model.add(Dense(self.action_size, activation='softmax', kernel_initializer='glorot_uniform'))
        model.summary()
        # Using categorical crossentropy as a loss is a trick to easily
        # implement the policy gradient. Categorical cross entropy is defined
        # H(p, q) = sum(p_i * log(q_i)). For the action taken, a, you set
        # p_a = advantage. q_a is the output of the policy network, which is
        # the probability of taking the action a, i.e. policy(s, a).
        # All other p_i are zero, thus we have H(p, q) = A * log(policy(s, a))
        model.compile(loss="categorical_crossentropy", optimizer=Adam(lr=self.learning_rate))
        return model

    # using the output of policy network, pick action stochastically
model.py 文件源码 项目:pydl 作者: rafaeltg 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def get_optimizer(self):

        if self.opt == 'sgd':
            return k_opt.SGD(lr=self.learning_rate, momentum=self.momentum)

        if self.opt == 'rmsprop':
            return k_opt.RMSprop(lr=self.learning_rate)

        if self.opt == 'adagrad':
            return k_opt.Adagrad(lr=self.learning_rate)

        if self.opt == 'adadelta':
            return k_opt.Adadelta(lr=self.learning_rate)

        if self.opt == 'adam':
            return k_opt.Adam(lr=self.learning_rate)

        raise Exception('Invalid optimization function - %s' % self.opt)
models.py 文件源码 项目:rogueinabox 作者: rogueinabox 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def build_model(self):
        initializer = initializers.random_normal(stddev=0.02)
        model = Sequential()
        if self.padding:
            model.add(ZeroPadding2D(padding=(1, 0), data_format="channels_first", input_shape=(self.layers, self.rows, self.columns)))
        model.add(Conv2D(32, (8, 8), activation="relu", data_format="channels_first",
                         strides=(4, 4), kernel_initializer=initializer, padding='same',
                         input_shape=(self.layers, self.rows, self.columns)))
        model.add(Conv2D(64, (4, 4), activation="relu", data_format="channels_first", strides=(2, 2),
                         kernel_initializer=initializer, padding='same'))
        model.add(Conv2D(64, (3, 3), activation="relu", data_format="channels_first", strides=(1, 1),
                         kernel_initializer=initializer, padding='same'))
        model.add(Flatten())
        model.add(Dense(512, activation="relu", kernel_initializer=initializer))
        model.add(Dense(self.actions_num, kernel_initializer=initializer))

        adam = Adam(lr=1e-6)
        model.compile(loss='mse', optimizer=adam)
        return model
convnet_model.py 文件源码 项目:data-science-bowl-2017 作者: tondonia 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def create_model_2():
    inputs = Input((32, 32, 32, 1))

    #noise = GaussianNoise(sigma=0.1)(x)

    conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = SpatialDropout3D(0.1)(conv1)
    conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)

    x = Flatten()(pool1)
    x = Dense(64, init='normal')(x)
    x = Dropout(0.5)(x)
    predictions = Dense(1, init='normal', activation='sigmoid')(x)

    model = Model(input=inputs, output=predictions)
    model.summary()
    optimizer = Adam(lr=1e-5)
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])

    return model
convnet_model.py 文件源码 项目:data-science-bowl-2017 作者: tondonia 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def create_model_1():
    inputs = Input((32, 32, 32, 1))

    #noise = GaussianNoise(sigma=0.1)(x)

    conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = SpatialDropout3D(0.1)(conv1)
    conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)

    x = Flatten()(pool1)
    x = Dense(64, init='normal')(x)
    predictions = Dense(1, init='normal', activation='sigmoid')(x)

    model = Model(input=inputs, output=predictions)
    model.summary()
    optimizer = Adam(lr=1e-5)
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])

    return model
deep_q.py 文件源码 项目:Snake-Game-AI 作者: elvisun 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _build_model(self):
        # Neural Net for Deep-Q learning Model
        model = Sequential()
        #model.add(Conv2D(256, kernel_size = (2,2), activation='relu', input_shape=(self.state_size.shape[0], self.state_size.shape[1],1), padding="same"))
        #model.add(Conv2D(712, kernel_size = (2,2), activation='relu', padding="same"))
        #model.add(Conv2D(128, kernel_size = (2,2), activation='relu', padding="same"))
        model.add(Dense(2048, input_dim=5, activation='relu'))#self.state_size.shape[0] * self.state_size.shape[1]
        #model.add(Flatten())
        model.add(Dense(1024, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(512, activation='relu'))
        model.add(Dense(256, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(128, activation='relu'))
        model.add(Dense(64, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(32, activation='relu'))
        model.add(Dense(16, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(8, activation='relu'))
        model.add(Dense(4, activation='linear'))
        model.compile(loss='mse',
                      optimizer=Adam(lr=self.learning_rate))
        return model
deep_mlp.py 文件源码 项目:RIDDLE 作者: jisungk 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def create_base_model(nb_features, nb_classes, learning_rate=0.02):
    model = Sequential() 

    # input layer + first hidden layer 
    model.add(Dense(512, kernel_initializer='lecun_uniform', input_shape=(nb_features,)))
    model.add(PReLU()) 
    model.add(Dropout(0.5)) 

    # additional hidden layer
    model.add(Dense(512, kernel_initializer='lecun_uniform')) 
    model.add(PReLU()) 
    model.add(Dropout(0.75)) 

    # output layer 
    model.add(Dense(nb_classes, kernel_initializer='lecun_uniform')) 
    model.add(Activation('softmax')) 

    model.compile(loss='categorical_crossentropy', 
        optimizer=Adam(lr=learning_rate), metrics=['accuracy'])  

    return model
modular_neural_network.py 文件源码 项目:deep-learning-with-Keras 作者: decordoba 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self):
        filters1 = [16, 32, 64]  # filters1 = [4, 8, 16, 32, 64, 128, 256]
        filters2 = [16, 32, 64]  # filters2 = [4, 8, 16, 32, 64, 128, 256]
        losses1 = [losses.MSE, losses.MAE, losses.hinge, losses.categorical_crossentropy]  # losses1 = [losses.MSE, losses.MAE, losses.hinge, losses.categorical_crossentropy]
        optimizers1 = [optimizers.Adam()]  # optimizers1 = [optimizers.Adadelta(), optimizers.Adagrad(), optimizers.Adam(), optimizers.Adamax(), optimizers.SGD(), optimizers.RMSprop()]
        units1 = [16, 32, 64]  # units1 = [4, 8, 16, 32, 64, 128, 256]
        kernel_sizes1 = [(3, 3)]  # kernel_sizes = [(3, 3), (5, 5)]
        dropouts1 = [0.25]  # dropouts1 = [0.25, 0.5, 0.75]
        dropouts2 = [0.5]  # dropouts2 = [0.25, 0.5, 0.75]
        pool_sizes1 = [(2, 2)]  # pool_sizes1 = [(2, 2)]

        # create standard experiments structure
        self.experiments = {"filters1": filters1,
                            "filters2": filters2,
                            "losses1": losses1,
                            "units1": units1,
                            "optimizers1": optimizers1,
                            "kernel_sizes1": kernel_sizes1,
                            "dropouts1": dropouts1,
                            "dropouts2": dropouts2,
                            "pool_sizes1": pool_sizes1}
modular_neural_network.py 文件源码 项目:deep-learning-with-Keras 作者: decordoba 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def run_experiment(self, input_shape, labels, comb):
        # comb holds values like (32, (2,2), optimizers-Adam()). We need to use self.keys_mapper
        # which maps a name ("units", "kernel_sizes", "optimizers") to the position where it is
        # in comb. I wonder if it would be more comprehensible with a function like
        # get_element_from_comb(self, comb, key) { return comb[self.keys_mapper[key]] }
        opt = comb[self.keys_mapper["optimizers1"]]
        loss = comb[self.keys_mapper["losses1"]]
        f1 = comb[self.keys_mapper["filters1"]]
        f2 = comb[self.keys_mapper["filters2"]]
        u1 = comb[self.keys_mapper["units1"]]
        ks = comb[self.keys_mapper["kernel_sizes1"]]
        ps = comb[self.keys_mapper["pool_sizes1"]]
        d1 = comb[self.keys_mapper["dropouts1"]]
        d2 = comb[self.keys_mapper["dropouts2"]]
        return (opt, loss,
                Conv2D(f1, kernel_size=ks, activation='relu', input_shape=input_shape),
                Conv2D(f2, kernel_size=ks, activation='relu'),
                MaxPooling2D(pool_size=ps),
                Dropout(d1),
                Flatten(),
                Dense(u1, activation='relu'),
                Dropout(d2),
                Dense(len(labels), activation='softmax'))
test_discrete.py 文件源码 项目:keras-rl 作者: matthiasplappert 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_dqn():
    env = TwoRoundDeterministicRewardEnv()
    np.random.seed(123)
    env.seed(123)
    random.seed(123)
    nb_actions = env.action_space.n

    # Next, we build a very simple model.
    model = Sequential()
    model.add(Dense(16, input_shape=(1,)))
    model.add(Activation('relu'))
    model.add(Dense(nb_actions))
    model.add(Activation('linear'))

    memory = SequentialMemory(limit=1000, window_length=1)
    policy = EpsGreedyQPolicy(eps=.1)
    dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=50,
                   target_model_update=1e-1, policy=policy, enable_double_dqn=False)
    dqn.compile(Adam(lr=1e-3))

    dqn.fit(env, nb_steps=2000, visualize=False, verbose=0)
    policy.eps = 0.
    h = dqn.test(env, nb_episodes=20, visualize=False)
    assert_allclose(np.mean(h.history['episode_reward']), 3.)


问题


面经


文章

微信
公众号

扫码关注公众号