python类Dropout()的实例源码

cnn.py 文件源码 项目:Nature-Conservancy-Fish-Image-Prediction 作者: Brok-Bucholtz 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def train(img_shape):
    classes = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']

    # Model
    model = Sequential()
    model.add(Convolution2D(
        32, 3, 3, input_shape=img_shape, activation='relu', W_constraint=maxnorm(3)))
    model.add(Dropout(0.2))
    model.add(Convolution2D(32, 3, 3, activation='relu', W_constraint=maxnorm(3)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(512, activation='relu', W_constraint=maxnorm(3)))
    model.add(Dropout(0.5))
    model.add(Dense(len(classes), activation='softmax'))

    features, labels = get_featurs_labels(img_shape)

    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    model.fit(features, labels, nb_epoch=10, batch_size=32, validation_split=0.2, verbose=1)
    return model
msgp_mlp_kin40k.py 文件源码 项目:keras-gp 作者: alshedivat 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def assemble_mlp(input_shape, output_shape, batch_size, nb_train_samples):
    """Assemble a simple MLP model.
    """
    inputs = Input(shape=input_shape)
    hidden = Dense(1024, activation='relu', name='dense1')(inputs)
    hidden = Dropout(0.5)(hidden)
    hidden = Dense(512, activation='relu', name='dense2')(hidden)
    hidden = Dropout(0.5)(hidden)
    hidden = Dense(64, activation='relu', name='dense3')(hidden)
    hidden = Dropout(0.25)(hidden)
    hidden = Dense(2, activation='relu', name='dense4')(hidden)
    gp = GP(hyp={
                'lik': np.log(0.3),
                'mean': [],
                'cov': [[0.5], [1.0]],
            },
            inf='infGrid', dlik='dlikGrid',
            opt={'cg_maxit': 2000, 'cg_tol': 1e-6},
            mean='meanZero', cov='covSEiso',
            update_grid=1,
            grid_kwargs={'eq': 1, 'k': 70.},
            batch_size=batch_size,
            nb_train_samples=nb_train_samples)
    outputs = [gp(hidden)]
    return Model(inputs=inputs, outputs=outputs)
train_bts.py 文件源码 项目:Msc_Multi_label_ZeroShot 作者: thomasSve 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def define_network(vector_size, loss):
    base_model = InceptionV3(weights='imagenet', include_top=True)

    for layer in base_model.layers: # Freeze layers in pretrained model
        layer.trainable = False

    # fully-connected layer to predict 
    x = Dense(4096, activation='relu', name='fc1')(base_model.layers[-2].output)
    x = Dense(8096, activation='relu', name='fc2')(x)
    x = Dropout(0.5)(x)
    x = Dense(2048,activation='relu', name='fc3')(x)
    predictions = Dense(vector_size, activation='relu')(x)
    l2 = Lambda(lambda x: K.l2_normalize(x, axis=1))(predictions)
    model = Model(inputs=base_model.inputs, outputs=l2)

    optimizer = 'adam'
    if loss == 'euclidean':
        model.compile(optimizer = optimizer, loss = euclidean_distance)
    else:
        model.compile(optimizer = optimizer, loss = loss)

    return model
mlp.py 文件源码 项目:pydl 作者: rafaeltg 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _create_layers(self, input_shape, n_output):

        """ Create the network layers
        :param input_shape:
        :param n_output:
        :return: self
        """

        # Hidden layers
        for i, l in enumerate(self.layers):
            self._model.add(Dense(units=l,
                                  input_shape=[input_shape[-1] if i == 0 else None],
                                  activation=self.activation[i],
                                  kernel_regularizer=l1_l2(self.l1_reg[i], self.l2_reg[i]),
                                  bias_regularizer=l1_l2(self.l1_reg[i], self.l2_reg[i])))

            if self.dropout[i] > 0:
                self._model.add(Dropout(rate=self.dropout[i]))

        # Output layer
        self._model.add(Dense(units=n_output, activation=self.out_activation))
stacked_autoencoder.py 文件源码 项目:pydl 作者: rafaeltg 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _create_layers(self, input_shape, n_output):

        """ Create the finetuning model
        :param input_shape:
        :param n_output:
        :return: self
        """

        # Hidden layers
        for i, l in enumerate(self.layers):
            self._model.add(Dense(input_shape=[input_shape[1] if i == 0 else None],
                                  units=l.n_hidden,
                                  weights=l.get_model_parameters()['enc'],
                                  activation=l.enc_activation,
                                  kernel_regularizer=l1_l2(l.l1_reg, l.l2_reg),
                                  bias_regularizer=l1_l2(l.l1_reg, l.l2_reg)))

            if self.dropout[i] > 0:
                self._model.add(Dropout(rate=self.dropout[i]))

        # Output layer
        self._model.add(Dense(units=n_output, activation=self.out_activation))
cnn_train.py 文件源码 项目:nuts-ml 作者: maet3608 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def create_network():
    from keras.models import Sequential
    from keras.layers import Dense, Dropout, Flatten
    from keras.layers import Conv2D, MaxPooling2D

    model = Sequential()
    model.add(Conv2D(32, (3, 3), activation='relu', input_shape=INPUT_SHAPE))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(NUM_CLASSES, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return KerasNetwork(model, 'cnn_weights.hd5')
models.py 文件源码 项目:Word2Vec 作者: hashbangCoder 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def BiDi(input_shape,vocabSize,veclen,wordWeights,nLayers,nHidden,lr):
    assert len(nHidden) == nLayers, '#Neurons for each layer does not match #Layers'
    r_flag = True
    _Input = Input(shape = (input_shape,),dtype = 'int32')
    E = keras.layers.embeddings.Embedding(vocabSize,veclen,weights=(wordWeights,),mask_zero = True)(_Input)
    for ind in range(nLayers):
        if ind == (nLayers-1):
            r_flag = False
        fwd_layer = keras.layers.recurrent.GRU(nHidden[ind],init='glorot_uniform',inner_init='orthogonal',activation='tanh',inner_activation='hard_sigmoid',return_sequences = r_flag)(E)
        bkwd_layer = keras.layers.recurrent.GRU(nHidden[ind],init='glorot_uniform',inner_init='orthogonal',activation='tanh',inner_activation='hard_sigmoid',return_sequences = r_flag,go_backwards = True)(E)
        E = merge([fwd_layer,bkwd_layer],mode = 'ave')
        #nHidden/= 2

    Output = Dense(1,activation = 'sigmoid')(Dropout(0.5)(E))
    model = Model(input = _Input, output = Output)

    opt = keras.optimizers.Adam(lr)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
dnn.py 文件源码 项目:Hotpot 作者: Liang-Qiu 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def prep_model(inputs, N, s0pad, s1pad, c):
    # Word-level projection before averaging
    inputs[0] = TimeDistributed(Dense(N, activation='relu'))(inputs[0])
    inputs[0] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[0])
    inputs[1] = TimeDistributed(Dense(N, activation='relu'))(inputs[1])
    inputs[1] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[1])
    merged = concatenate([inputs[0], inputs[1]])

    # Deep
    for i in range(c['deep']):
        merged = Dense(c['nndim'], activation=c['nnact'])(merged)
        merged = Dropout(c['nndropout'])(merged)
        merged = BatchNormalization()(merged)

    is_duplicate = Dense(1, activation='sigmoid')(merged)
    return [is_duplicate], N
cnn.py 文件源码 项目:Hotpot 作者: Liang-Qiu 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def prep_model(inputs, N, s0pad, s1pad, c):
    Nc, outputs = B.cnnsum_input(inputs, N, s0pad, siamese=c['cnnsiamese'],
                        dropout=c['dropout'], l2reg=c['l2reg'],
                        cnninit=c['cnninit'], cnnact=c['cnnact'], cdim=c['cdim'])

    # Projection
    if c['project']:
        outputs = Dense(int(N*c['pdim']), kernal_regularizer=l2(c['l2reg']), activation=c['pact'])(outputs)
        # model.add_shared_node(name='proj', inputs=['e0s_', 'e1s_'], outputs=['e0p', 'e1p'],
        #                       layer=Dense(input_dim=Nc, output_dim=int(N*c['pdim']),
        #                                   W_regularizer=l2(c['l2reg']), activation=c['pact']))
        # This dropout is controversial; it might be harmful to apply,
        # or at least isn't a clear win.
        # model.add_shared_node(name='projdrop', inputs=['e0p', 'e1p'], outputs=['e0p_', 'e1p_'],
        #                       layer=Dropout(c['dropout'], input_shape=(N,)))
        # return ('e0p_', 'e1p_')
    return outputs, N
dnn.py 文件源码 项目:Hotpot 作者: Liang-Qiu 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def prep_model(inputs, N, s0pad, s1pad, c):
    # Word-level projection before averaging
    inputs[0] = TimeDistributed(Dense(N, activation='relu'))(inputs[0])
    inputs[0] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[0])
    inputs[1] = TimeDistributed(Dense(N, activation='relu'))(inputs[1])
    inputs[1] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[1])
    merged = concatenate([inputs[0], inputs[1]])

    # Deep
    for i in range(c['deep']):
        merged = Dense(c['nndim'], activation=c['nnact'])(merged)
        merged = Dropout(c['nndropout'])(merged)
        merged = BatchNormalization()(merged)

    is_duplicate = Dense(1, activation='sigmoid')(merged)
    return [is_duplicate], N
cnn.py 文件源码 项目:Hotpot 作者: Liang-Qiu 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def prep_model(inputs, N, s0pad, s1pad, c):
    Nc, outputs = B.cnnsum_input(inputs, N, s0pad, siamese=c['cnnsiamese'],
                        dropout=c['dropout'], l2reg=c['l2reg'],
                        cnninit=c['cnninit'], cnnact=c['cnnact'], cdim=c['cdim'])

    # Projection
    if c['project']:
        outputs = Dense(int(N*c['pdim']), kernal_regularizer=l2(c['l2reg']), activation=c['pact'])(outputs)
        # model.add_shared_node(name='proj', inputs=['e0s_', 'e1s_'], outputs=['e0p', 'e1p'],
        #                       layer=Dense(input_dim=Nc, output_dim=int(N*c['pdim']),
        #                                   W_regularizer=l2(c['l2reg']), activation=c['pact']))
        # This dropout is controversial; it might be harmful to apply,
        # or at least isn't a clear win.
        # model.add_shared_node(name='projdrop', inputs=['e0p', 'e1p'], outputs=['e0p_', 'e1p_'],
        #                       layer=Dropout(c['dropout'], input_shape=(N,)))
        # return ('e0p_', 'e1p_')
    return outputs, N
densenet.py 文件源码 项目:DenseNetKeras 作者: SulemanKazi 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def addLayer(previousLayer, nChannels, nOutChannels, dropRate, blockNum):

    bn = BatchNormalization(name = 'denseb_BatchNorm_{}'.format(blockNum) , axis = 1)(previousLayer)

    relu = Activation('relu', name ='denseb_relu_{}'.format(blockNum))(bn)

    conv = Convolution2D(nOutChannels, 3, 3, border_mode='same', name='denseb_conv_{}'.format(blockNum))(relu)

    if dropRate is not None:

        dp = Dropout(dropRate, name='denseb_dropout_{}'.format)(conv)

        return merge([dp, previousLayer], mode='concat', concat_axis=1)

    else:

        return merge([conv, previousLayer], mode='concat', concat_axis=1)
densenet.py 文件源码 项目:DenseNetKeras 作者: SulemanKazi 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def addTransition(previousLayer, nChannels, nOutChannels, dropRate, blockNum):

    bn = BatchNormalization(name = 'tr_BatchNorm_{}'.format(blockNum), axis = 1)(previousLayer)

    relu = Activation('relu', name ='tr_relu_{}'.format(blockNum))(bn)

    conv = Convolution2D(nOutChannels, 1, 1, border_mode='same', name='tr_conv_{}'.format(blockNum))(relu)

    if dropRate is not None:

        dp = Dropout(dropRate, name='tr_dropout_{}'.format)(conv)

        avgPool = AveragePooling2D(pool_size=(2, 2))(dp)

    else:
        avgPool = AveragePooling2D(pool_size=(2, 2))(conv)

    return avgPool
train_nets.py 文件源码 项目:subtitle-synchronization 作者: AlbertoSabater 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def model_cnn(net_layers, input_shape):

    inp = Input(shape=input_shape)
    model = inp

    for cl in net_layers['conv_layers']:
        model = Conv2D(filters=cl[0], kernel_size=cl[1], activation='relu')(model)
        if cl[4]:
            model = MaxPooling2D()(model)
        if cl[2]:
            model = BatchNormalization()(model)
        if cl[3]:
            model = Dropout(0.2)(model)

    model = Flatten()(model)

    for dl in net_layers['dense_layers']:
        model = Dense(dl[0])(model)
        model = Activation('relu')(model)
        if dl[1]:
            model = BatchNormalization()(model)
        if dl[2]:
            model = Dropout(0.2)(model)

    model = Dense(1)(model)
    model = Activation('sigmoid')(model)

    model = Model(inp, model)
    return model



# %%

# LSTM architecture
# conv_layers -> [(filters, kernel_size, BatchNormaliztion, Dropout, MaxPooling)]
# dense_layers -> [(num_neurons, BatchNormaliztion, Dropout)]
train_nets.py 文件源码 项目:subtitle-synchronization 作者: AlbertoSabater 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def model_lstm(input_shape):

    inp = Input(shape=input_shape)
    model = inp

    if input_shape[0] > 2: model = Conv1D(filters=24, kernel_size=(3), activation='relu')(model)
#    if input_shape[0] > 0: model = TimeDistributed(Conv1D(filters=24, kernel_size=3, activation='relu'))(model)
    model = LSTM(16)(model)
    model = Activation('relu')(model)
    model = Dropout(0.2)(model)
    model = Dense(16)(model)
    model = Activation('relu')(model)
    model = BatchNormalization()(model)

    model = Dense(1)(model)
    model = Activation('sigmoid')(model)

    model = Model(inp, model)
    return model

# %% 

# Conv-1D architecture. Just one sample as input
model_entailment.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _get_encoded_sentence_variables(self, sent1_input_layer, sent2_input_layer, dropout,
                                        embedding_file, tune_embedding):
        if embedding_file is None:
            if not tune_embedding:
                print >>sys.stderr, "Pretrained embedding is not given. Setting tune_embedding to True."
                tune_embedding = True
            embedding = None
        else:
            # Put the embedding in a list for Keras to treat it as initiali weights of the embeddign layer.
            embedding = [self.data_processor.get_embedding_matrix(embedding_file, onto_aware=False)]
        vocab_size = self.data_processor.get_vocab_size(onto_aware=False)
        embedding_layer = Embedding(input_dim=vocab_size, output_dim=self.embed_dim, weights=embedding,
                                    trainable=tune_embedding, mask_zero=True, name="embedding")
        embedded_sent1 = embedding_layer(sent1_input_layer)
        embedded_sent2 = embedding_layer(sent2_input_layer)
        if "embedding" in dropout:
            embedded_sent1 = Dropout(dropout["embedding"])(embedded_sent1)
            embedded_sent2 = Dropout(dropout["embedding"])(embedded_sent2)
        if self.shared_memory:
            encoder = MultipleMemoryAccessNSE(output_dim=self.embed_dim, return_mode="output_and_memory",
                                              name="encoder")
            mmanse_sent1_input = InputMemoryMerger(name="merge_sent1_input")([embedded_sent1, embedded_sent2])
            encoded_sent1_and_memory = encoder(mmanse_sent1_input)
            encoded_sent1 = OutputSplitter("output", name="get_sent1_output")(encoded_sent1_and_memory)
            shared_memory = OutputSplitter("memory", name="get_shared_memory")(encoded_sent1_and_memory)
            mmanse_sent2_input = InputMemoryMerger(name="merge_sent2_input")([embedded_sent2, shared_memory])
            encoded_sent2_and_memory = encoder(mmanse_sent2_input)
            encoded_sent2 = OutputSplitter("output", name="get_sent2_output")(encoded_sent2_and_memory)
        else:
            encoder = NSE(output_dim=self.embed_dim, name="encoder")
            encoded_sent1 = encoder(embedded_sent1)
            encoded_sent2 = encoder(embedded_sent2)
        if "encoder" in dropout:
            encoded_sent1 = Dropout(dropout["encoder"])(encoded_sent1)
            encoded_sent2 = Dropout(dropout["encoder"])(encoded_sent2)
        return encoded_sent1, encoded_sent2
model_sentences.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def train(self, S_ind, C_ind, use_onto_lstm=True, use_attention=True, num_epochs=20,  hierarchical=False, base=2):
    # Predict next word from current synsets
    X = C_ind[:,:-1] if use_onto_lstm else S_ind[:,:-1] # remove the last words' hyps in all sentences
    Y_inds = S_ind[:,1:] # remove the first words in all sentences
    if hierarchical:
      train_targets = self._factor_target_indices(Y_inds, base=base)
    else:
      train_targets = [self._make_one_hot(Y_inds, Y_inds.max() + 1)]
    length = Y_inds.shape[1]
    lstm_outdim = self.word_dim

    num_words = len(self.dp.word_index)
    num_syns = len(self.dp.synset_index)
    input = Input(shape=X.shape[1:], dtype='int32')
    embed_input_dim = num_syns if use_onto_lstm else num_words
    embed_layer = HigherOrderEmbedding(name='embedding', input_dim=embed_input_dim, output_dim=self.word_dim, input_shape=X.shape[1:], mask_zero=True)
    sent_rep = embed_layer(input)
    reg_sent_rep = Dropout(0.5)(sent_rep)
    if use_onto_lstm:
      lstm_out = OntoAttentionLSTM(name='sent_lstm', input_dim=self.word_dim, output_dim=lstm_outdim, input_length=length, num_senses=self.num_senses, num_hyps=self.num_hyps, return_sequences=True, use_attention=use_attention)(reg_sent_rep)
    else:
      lstm_out = LSTM(name='sent_lstm', input_dim=self.word_dim, output_dim=lstm_outdim, input_length=length, return_sequences=True)(reg_sent_rep)
    output_nodes = []
    # Make one node for each factored target
    for target in train_targets:
      node = TimeDistributed(Dense(input_dim=lstm_outdim, output_dim=target.shape[-1], activation='softmax'))(lstm_out)
      output_nodes.append(node)

    model = Model(input=input, output=output_nodes)
    print >>sys.stderr, model.summary()
    early_stopping = EarlyStopping()
    precompile_time = time.time()
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    postcompile_time = time.time()
    print >>sys.stderr, "Model compilation took %d s"%(postcompile_time - precompile_time)
    model.fit(X, train_targets, nb_epoch=num_epochs, validation_split=0.1, callbacks=[early_stopping])
    posttrain_time = time.time()
    print >>sys.stderr, "Training took %d s"%(posttrain_time - postcompile_time)
    concept_reps = model.layers[1].get_weights()
    self.model = model
    return concept_reps
model.py 文件源码 项目:image_caption 作者: MaticsL 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def image_caption_model(vocab_size=2500, embedding_matrix=None, lang_dim=100,
            max_caplen=28, img_dim=2048, clipnorm=1):
    print('generating vocab_history model v5')
    # text: current word
    lang_input = Input(shape=(1,))
    img_input = Input(shape=(img_dim,))
    seq_input = Input(shape=(max_caplen,))
    vhist_input = Input(shape=(vocab_size,))

    if embedding_matrix is not None:
        x = Embedding(output_dim=lang_dim, input_dim=vocab_size, init='glorot_uniform', input_length=1, weights=[embedding_matrix])(lang_input)
    else:
        x = Embedding(output_dim=lang_dim, input_dim=vocab_size, init='glorot_uniform', input_length=1)(lang_input)

    lang_embed = Reshape((lang_dim,))(x)
    lang_embed = merge([lang_embed, seq_input], mode='concat', concat_axis=-1)
    lang_embed = Dense(lang_dim)(lang_embed)
    lang_embed = Dropout(0.25)(lang_embed)

    merge_layer = merge([img_input, lang_embed, vhist_input], mode='concat', concat_axis=-1)
    merge_layer = Reshape((1, lang_dim+img_dim+vocab_size))(merge_layer)

    gru_1 = GRU(img_dim)(merge_layer)
    gru_1 = Dropout(0.25)(gru_1)
    gru_1 = Dense(img_dim)(gru_1)
    gru_1 = BatchNormalization()(gru_1)
    gru_1 = Activation('softmax')(gru_1)

    attention_1 = merge([img_input, gru_1], mode='mul', concat_axis=-1)
    attention_1 = merge([attention_1, lang_embed, vhist_input], mode='concat', concat_axis=-1)
    attention_1 = Reshape((1, lang_dim + img_dim + vocab_size))(attention_1)
    gru_2 = GRU(1024)(attention_1)
    gru_2 = Dropout(0.25)(gru_2)
    gru_2 = Dense(vocab_size)(gru_2)
    gru_2 = BatchNormalization()(gru_2)
    out = Activation('softmax')(gru_2)

    model = Model(input=[img_input, lang_input, seq_input, vhist_input], output=out)
    model.compile(loss='categorical_crossentropy', optimizer=RMSprop(lr=0.0001, clipnorm=1.))
    return model
mnist_mlp_benchmark.py 文件源码 项目:benchmarks 作者: tensorflow 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def run_benchmark(self, gpus=0):
        num_classes = 10

        # Generate random input data
        input_shape = (self.num_samples, 28, 28)
        x_train, y_train = generate_img_input_data(input_shape)

        x_train = x_train.reshape(self.num_samples, 784)
        x_train = x_train.astype('float32')
        x_train /= 255

        # convert class vectors to binary class matrices
        y_train = keras.utils.to_categorical(y_train, num_classes)

        model = Sequential()
        model.add(Dense(512, activation='relu', input_shape=(784,)))
        model.add(Dropout(0.2))
        model.add(Dense(512, activation='relu'))
        model.add(Dropout(0.2))
        model.add(Dense(num_classes, activation='softmax'))

        if keras.backend.backend() is "tensorflow" and gpus > 1:
            model = multi_gpu_model(model, gpus=gpus)

        model.compile(loss='categorical_crossentropy',
                      optimizer=RMSprop(),
                      metrics=['accuracy'])

        # create a distributed trainer for cntk
        if keras.backend.backend() is "cntk" and gpus > 1:
            start, end = cntk_gpu_mode_config(model, x_train.shape[0])
            x_train = x_train[start: end]
            y_train = y_train[start: end]

        time_callback = timehistory.TimeHistory()
        model.fit(x_train, y_train, batch_size=self.batch_size,
                  epochs=self.epochs, verbose=1, callbacks=[time_callback])

        self.total_time = 0
        for i in range(1, self.epochs):
            self.total_time += time_callback.times[i]
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def cnn3adam_slim(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
    """
    model = Sequential(name='cnn3adam')
    model.add(Conv1D (kernel_size = (50), filters = 32, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (5), filters = 64, strides=1, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())
    model.add(Conv1D (kernel_size = (5), filters = 64, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())
    model.add(Flatten())
    model.add(Dense (250, activation='elu', kernel_initializer='he_normal'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense (250, activation='elu', kernel_initializer='he_normal'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam())
    return model


问题


面经


文章

微信
公众号

扫码关注公众号