python类Dense()的实例源码

VGG_16.py 文件源码 项目:AerialCrackDetection_Keras 作者: TTMRonald 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def classifier(base_layers, input_rois, batch_size, nb_classes = 3, trainable=False):

    # compile times tend to be very high, so we use smaller ROI pooling regions to workaround
    if K.backend() == 'tensorflow':
        pooling_regions = 14
        input_shape = (batch_size,14,14,1024)
    elif K.backend() == 'theano':
        pooling_regions = 7
        input_shape = (batch_size,1024,7,7)

    out_roi_pool = RoiPoolingConv(pooling_regions, batch_size)([base_layers, input_rois])
    out = TimeDistributed(Flatten())(out_roi_pool)
    out = TimeDistributed(Dense(4096,activation='relu'))(out)
    out = TimeDistributed(Dropout(0.5))(out)
    out = TimeDistributed(Dense(4096,activation='relu'))(out)
    out = TimeDistributed(Dropout(0.5))(out)

    out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)
    # note: no regression target for bg class
    out_regr = TimeDistributed(Dense(4 * nb_classes, activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)
    return [out_class, out_regr]
model.py 文件源码 项目:Sentiment-Analysis 作者: jasonwu0731 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, n_classes, vocab_size, max_len, num_units=128,
                 useBiDirection=False, useAttention=False, learning_rate=0.001, dropout=0, embedding_size=300):
        self.model = Sequential()
        self.model.add(Embedding(input_dim=vocab_size,
                                 output_dim=embedding_size, input_length=max_len))
        lstm_model = LSTM(num_units, dropout=dropout)
        if useBiDirection:
            lstm_model = Bidirectional(lstm_model)
        if useAttention:
            lstm_model = lstm_model
            print("Attention not implement yet ... ")
        self.model.add(lstm_model)
        self.model.add(Dense(n_classes, activation='softmax'))

        self.model.summary()
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=Adam(lr=learning_rate),
                           metrics=['accuracy'])
n12_pepe_zoo.py 文件源码 项目:kaggle_yt8m 作者: N01Z3 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def build_mod5(opt=adam()):
    n = 3 * 1024
    in1 = Input((128,), name='x1')
    x1 = fc_block1(in1, n)
    x1 = fc_identity(x1, n)


    in2 = Input((1024,), name='x2')
    x2 = fc_block1(in2, n)
    x2 = fc_identity(x2, n)

    x = merge([x1, x2], mode='concat', concat_axis=1)

    x = fc_identity(x, n)

    out = Dense(4716, activation='sigmoid', name='output')(x)

    model = Model(input=[in1, in2], output=out)
    model.compile(optimizer=opt, loss='categorical_crossentropy')

    # model.summary()
    # plot(model=model, show_shapes=True)
    return model
regression.py 文件源码 项目:autonomio 作者: autonomio 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def regression(X, Y, epochs, reg_mode):

    x, y = np.array(X),np.array(Y)

    model = Sequential()

    if reg_mode == 'linear':
        model.add(Dense(1, input_dim=x.shape[1]))
        model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='mse')

    elif reg_mode == 'logistic':
        model.add(Dense(1, activation='sigmoid', input_dim=x.shape[1]))
        model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='binary_crossentropy')

    elif reg_mode == 'regularized':
        reg = l1_l2(l1=0.01, l2=0.01)
        model.add(Dense(1, activation='sigmoid', W_regularizer=reg, input_dim=x.shape[1]))
        model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='binary_crossentropy')

    out = model.fit(x, y, nb_epoch=epochs, verbose=0, validation_split=.33)

    return model, out
vgg.py 文件源码 项目:keras-frcnn 作者: yhenon 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def classifier(base_layers, input_rois, num_rois, nb_classes = 21, trainable=False):

    # compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround

    if K.backend() == 'tensorflow':
        pooling_regions = 7
        input_shape = (num_rois,7,7,512)
    elif K.backend() == 'theano':
        pooling_regions = 7
        input_shape = (num_rois,512,7,7)

    out_roi_pool = RoiPoolingConv(pooling_regions, num_rois)([base_layers, input_rois])

    out = TimeDistributed(Flatten(name='flatten'))(out_roi_pool)
    out = TimeDistributed(Dense(4096, activation='relu', name='fc1'))(out)
    out = TimeDistributed(Dropout(0.5))(out)
    out = TimeDistributed(Dense(4096, activation='relu', name='fc2'))(out)
    out = TimeDistributed(Dropout(0.5))(out)

    out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)
    # note: no regression target for bg class
    out_regr = TimeDistributed(Dense(4 * (nb_classes-1), activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)

    return [out_class, out_regr]
main.py 文件源码 项目:keras-timeseries-prediction 作者: gcarq 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def build_model(look_back: int, batch_size: int=1) -> Sequential:
    """
    The function builds a keras Sequential model
    :param look_back: number of previous time steps as int
    :param batch_size: batch_size as int, defaults to 1
    :return: keras Sequential model
    """
    model = Sequential()
    model.add(LSTM(64,
                   activation='relu',
                   batch_input_shape=(batch_size, look_back, 1),
                   stateful=True,
                   return_sequences=False))
    model.add(Dense(1, activation='linear'))
    model.compile(loss='mean_squared_error', optimizer='adam')
    return model
model.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def build_encoder(self,input_shape):
        return [GaussianNoise(self.parameters['noise']),
                BN(),
                Dense(self.parameters['layer'], activation='relu', use_bias=False),
                BN(),
                Dropout(self.parameters['dropout']),
                Dense(self.parameters['layer'], activation='relu', use_bias=False),
                BN(),
                Dropout(self.parameters['dropout']),
                Dense(self.parameters['layer'], activation='relu', use_bias=False),
                BN(),
                Dropout(self.parameters['dropout']),
                Dense(self.parameters['N']*self.parameters['M']),]
model.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _build(self,input_shape):
        x = Input(shape=input_shape)
        N = input_shape[0] // 2

        y = Sequential([
            flatten,
            *[Sequential([BN(),
                          Dense(self.parameters['layer'],activation=self.parameters['activation']),
                          Dropout(self.parameters['dropout']),])
              for i in range(self.parameters['num_layers']) ],
            Dense(1,activation="sigmoid")
        ])(x)

        self.loss = bce
        self.net = Model(x, y)
        # self.callbacks.append(self.linear_schedule([0.2,0.5], 0.1))
        self.callbacks.append(GradientEarlyStopping(verbose=1,epoch=50,min_grad=self.parameters['min_grad']))
        # self.custom_log_functions['lr'] = lambda: K.get_value(self.net.optimizer.lr)
ddpg.py 文件源码 项目:Learning-to-navigate-without-a-map 作者: ToniRV 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def create_actor_network(self, state_size, action_dim):
        """Create actor network."""
        print ("[MESSAGE] Build actor network.""")
        S = Input(shape=state_size)
        h_0 = Conv2D(32, (3, 3), padding="same",
                     kernel_regularizer=l2(0.0001),
                     activation="relu")(S)
        h_1 = Conv2D(32, (3, 3), padding="same",
                     kernel_regularizer=l2(0.0001),
                     activation="relu")(h_0)
        h_1 = AveragePooling2D(2, 2)(h_1)
        h_1 = Flatten()(h_1)
        h_1 = Dense(600, activation="relu")(h_1)
        A = Dense(action_dim, activation="softmax")(h_1)

        model = Model(inputs=S, outputs=A)

        return model, model.trainable_weights, S
model.py 文件源码 项目:keras-molecules 作者: maxhodak 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01):
        h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x)
        h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h)
        h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h)
        h = Flatten(name='flatten_1')(h)
        h = Dense(435, activation = 'relu', name='dense_1')(h)

        def sampling(args):
            z_mean_, z_log_var_ = args
            batch_size = K.shape(z_mean_)[0]
            epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., std = epsilon_std)
            return z_mean_ + K.exp(z_log_var_ / 2) * epsilon

        z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h)
        z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h)

        def vae_loss(x, x_decoded_mean):
            x = K.flatten(x)
            x_decoded_mean = K.flatten(x_decoded_mean)
            xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
            kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1)
            return xent_loss + kl_loss

        return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var]))
model.py 文件源码 项目:cloudml-samples 作者: GoogleCloudPlatform 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def model_fn(input_dim,
             labels_dim,
             hidden_units=[100, 70, 50, 20],
             learning_rate=0.1):
  """Create a Keras Sequential model with layers."""
  model = models.Sequential()

  for units in hidden_units:
    model.add(layers.Dense(units=units,
                           input_dim=input_dim,
                           activation=relu))
    input_dim = units

  # Add a dense final layer with sigmoid function
  model.add(layers.Dense(labels_dim, activation=sigmoid))
  compile_model(model, learning_rate)
  return model
test_training.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_trainable_argument():
    x = np.random.random((5, 3))
    y = np.random.random((5, 2))

    model = Sequential()
    model.add(Dense(2, input_dim=3, trainable=False))
    model.compile('rmsprop', 'mse')
    out = model.predict(x)
    model.train_on_batch(x, y)
    out_2 = model.predict(x)
    assert_allclose(out, out_2)

    # test with nesting
    input = Input(shape=(3,))
    output = model(input)
    model = Model(input, output)
    model.compile('rmsprop', 'mse')
    out = model.predict(x)
    model.train_on_batch(x, y)
    out_2 = model.predict(x)
    assert_allclose(out, out_2)
test_model_saving.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def test_sequential_model_saving_2():
    # test with custom optimizer, loss
    custom_opt = optimizers.rmsprop
    custom_loss = objectives.mse
    model = Sequential()
    model.add(Dense(2, input_dim=3))
    model.add(Dense(3))
    model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])

    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname,
                       custom_objects={'custom_opt': custom_opt,
                                       'custom_loss': custom_loss})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
test_model_saving.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_fuctional_model_saving():
    input = Input(shape=(3,))
    x = Dense(2)(input)
    output = Dense(3)(x)

    model = Model(input, output)
    model.compile(loss=objectives.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
lstm_bilstm.py 文件源码 项目:sota_sentiment 作者: jbarnesspain 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def create_BiLSTM(wordvecs, lstm_dim=300, output_dim=2, dropout=.5,
                weights=None, train=True):
    model = Sequential()
    if weights != None:
        model.add(Embedding(len(wordvecs)+1,
            len(wordvecs['the']),
            weights=[weights],
                    trainable=train))
    else:
        model.add(Embedding(len(wordvecs)+1,
            len(wordvecs['the']),
                    trainable=train))
    model.add(Dropout(dropout))
    model.add(Bidirectional(LSTM(lstm_dim)))
    model.add(Dropout(dropout))
    model.add(Dense(output_dim, activation='softmax'))
    if output_dim == 2:
        model.compile('adam', 'binary_crossentropy',
                  metrics=['accuracy'])
    else:
        model.compile('adam', 'categorical_crossentropy',
                  metrics=['accuracy'])
    return model
reactionrnn.py 文件源码 项目:reactionrnn 作者: minimaxir 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def reactionrnn_model(weights_path, num_classes, maxlen=140):
    '''
    Builds the model architecture for textgenrnn and
    loads the pretrained weights for the model.
    '''

    input = Input(shape=(maxlen,), name='input')
    embedded = Embedding(num_classes, 100, input_length=maxlen,
                         name='embedding')(input)
    rnn = GRU(256, return_sequences=False, name='rnn')(embedded)
    output = Dense(5, name='output',
                   activation=lambda x: K.relu(x) / K.sum(K.relu(x),
                                                          axis=-1))(rnn)

    model = Model(inputs=[input], outputs=[output])
    model.load_weights(weights_path, by_name=True)
    model.compile(loss='mse', optimizer='nadam')
    return model
improved_wgan.py 文件源码 项目:keras-contrib 作者: farizrahman4u 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def make_discriminator():
    """Creates a discriminator model that takes an image as input and outputs a single value, representing whether
    the input is real or generated. Unlike normal GANs, the output is not sigmoid and does not represent a probability!
    Instead, the output should be as large and negative as possible for generated inputs and as large and positive
    as possible for real inputs.

    Note that the improved WGAN paper suggests that BatchNormalization should not be used in the discriminator."""
    model = Sequential()
    if K.image_data_format() == 'channels_first':
        model.add(Convolution2D(64, (5, 5), padding='same', input_shape=(1, 28, 28)))
    else:
        model.add(Convolution2D(64, (5, 5), padding='same', input_shape=(28, 28, 1)))
    model.add(LeakyReLU())
    model.add(Convolution2D(128, (5, 5), kernel_initializer='he_normal', strides=[2, 2]))
    model.add(LeakyReLU())
    model.add(Convolution2D(128, (5, 5), kernel_initializer='he_normal', padding='same', strides=[2, 2]))
    model.add(LeakyReLU())
    model.add(Flatten())
    model.add(Dense(1024, kernel_initializer='he_normal'))
    model.add(LeakyReLU())
    model.add(Dense(1, kernel_initializer='he_normal'))
    return model
train_nets.py 文件源码 项目:subtitle-synchronization 作者: AlbertoSabater 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def model_cnn(net_layers, input_shape):

    inp = Input(shape=input_shape)
    model = inp

    for cl in net_layers['conv_layers']:
        model = Conv2D(filters=cl[0], kernel_size=cl[1], activation='relu')(model)
        if cl[4]:
            model = MaxPooling2D()(model)
        if cl[2]:
            model = BatchNormalization()(model)
        if cl[3]:
            model = Dropout(0.2)(model)

    model = Flatten()(model)

    for dl in net_layers['dense_layers']:
        model = Dense(dl[0])(model)
        model = Activation('relu')(model)
        if dl[1]:
            model = BatchNormalization()(model)
        if dl[2]:
            model = Dropout(0.2)(model)

    model = Dense(1)(model)
    model = Activation('sigmoid')(model)

    model = Model(inp, model)
    return model



# %%

# LSTM architecture
# conv_layers -> [(filters, kernel_size, BatchNormaliztion, Dropout, MaxPooling)]
# dense_layers -> [(num_neurons, BatchNormaliztion, Dropout)]
train_nets.py 文件源码 项目:subtitle-synchronization 作者: AlbertoSabater 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def model_lstm(input_shape):

    inp = Input(shape=input_shape)
    model = inp

    if input_shape[0] > 2: model = Conv1D(filters=24, kernel_size=(3), activation='relu')(model)
#    if input_shape[0] > 0: model = TimeDistributed(Conv1D(filters=24, kernel_size=3, activation='relu'))(model)
    model = LSTM(16)(model)
    model = Activation('relu')(model)
    model = Dropout(0.2)(model)
    model = Dense(16)(model)
    model = Activation('relu')(model)
    model = BatchNormalization()(model)

    model = Dense(1)(model)
    model = Activation('sigmoid')(model)

    model = Model(inp, model)
    return model

# %% 

# Conv-1D architecture. Just one sample as input
encdec_lstm_test.py 文件源码 项目:DeepAnomaly 作者: adiyoss 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test(path_test, input_size, hidden_size, batch_size, save_dir, model_name, maxlen):
    db = read_data(path_test)
    X = create_sequences(db, maxlen, maxlen)
    y = create_sequences(db, maxlen, maxlen)
    X = np.reshape(X, (X.shape[0], X.shape[1], 1))
    y = np.reshape(y, (y.shape[0], y.shape[1], 1))

    # build the model: 1 layer LSTM
    print('Build model...')
    model = Sequential()
    # "Encode" the input sequence using an RNN, producing an output of HIDDEN_SIZE
    # note: in a situation where your input sequences have a variable length,
    # use input_shape=(None, nb_feature).
    model.add(LSTM(hidden_size, input_shape=(maxlen, input_size)))
    # For the decoder's input, we repeat the encoded input for each time step
    model.add(RepeatVector(maxlen))
    # The decoder RNN could be multiple layers stacked or a single layer
    model.add(LSTM(hidden_size, return_sequences=True))

    # For each of step of the output sequence, decide which character should be chosen
    model.add(TimeDistributed(Dense(1)))

    model.load_weights(save_dir + model_name)

    model.compile(loss='mae', optimizer='adam')
    model.summary()

    prediction = model.predict(X, batch_size, verbose=1, )
    prediction = prediction.flatten()
    # prediction_container = np.array(prediction).flatten()
    plt.plot(prediction.flatten()[:4000], label='prediction')
    plt.plot(y.flatten()[maxlen:4000 + maxlen], label='true')
    plt.legend()
    plt.show()

    store_prediction_and_ground_truth(model)


问题


面经


文章

微信
公众号

扫码关注公众号