python类Conv1D()的实例源码

agent_LSTM.py 文件源码 项目:gym-forex 作者: harveybc 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _build_model(self):
        # Deep Conv Neural Net for Deep-Q learning Model
        model = Sequential()
        model.add(Conv1D(128, 3, input_shape=(19,48)))
        model.add(Activation('relu'))
        model.add(MaxPooling1D(pool_size=2))

        model.add(Conv1D(64, 3))
        model.add(Activation('relu'))
        model.add(MaxPooling1D(pool_size=2))

        model.add(Flatten())  # this converts our 3D feature maps to 1D feature vectors
        model.add(Dense(64))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(self.action_size))
        model.add(Activation('sigmoid'))

        model.compile(loss=self._huber_loss,
                      optimizer=Adam(lr=self.learning_rate))
        #model.compile(loss='binary_crossentropy',
        #              optimizer='rmsprop',
        #              metrics=['accuracy'])

        return model
doc-cnn4.py 文件源码 项目:char-models 作者: offbit 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def char_block(in_layer, nb_filter=(64, 100), filter_length=(3, 3), subsample=(2, 1), pool_length=(2, 2)):
    block = in_layer
    for i in range(len(nb_filter)):

        block = Conv1D(filters=nb_filter[i],
                       kernel_size=filter_length[i],
                       padding='valid',
                       activation='tanh',
                       strides=subsample[i])(block)

        # block = BatchNormalization()(block)
        # block = Dropout(0.1)(block)
        if pool_length[i]:
            block = MaxPooling1D(pool_size=pool_length[i])(block)

    # block = Lambda(max_1d, output_shape=(nb_filter[-1],))(block)
    block = GlobalMaxPool1D()(block)
    block = Dense(128, activation='relu')(block)
    return block
train_nets.py 文件源码 项目:subtitle-synchronization 作者: AlbertoSabater 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def model_lstm(input_shape):

    inp = Input(shape=input_shape)
    model = inp

    if input_shape[0] > 2: model = Conv1D(filters=24, kernel_size=(3), activation='relu')(model)
#    if input_shape[0] > 0: model = TimeDistributed(Conv1D(filters=24, kernel_size=3, activation='relu'))(model)
    model = LSTM(16)(model)
    model = Activation('relu')(model)
    model = Dropout(0.2)(model)
    model = Dense(16)(model)
    model = Activation('relu')(model)
    model = BatchNormalization()(model)

    model = Dense(1)(model)
    model = Activation('sigmoid')(model)

    model = Model(inp, model)
    return model

# %% 

# Conv-1D architecture. Just one sample as input
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def cnn3adam_slim(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
    """
    model = Sequential(name='cnn3adam')
    model.add(Conv1D (kernel_size = (50), filters = 32, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (5), filters = 64, strides=1, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())
    model.add(Conv1D (kernel_size = (5), filters = 64, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())
    model.add(Flatten())
    model.add(Dense (250, activation='elu', kernel_initializer='he_normal'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense (250, activation='elu', kernel_initializer='he_normal'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam())
    return model
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def cnn3adam_filter(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
    """
    print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
    print('use L2 model instead!')
    print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
    model = Sequential(name='cnn3adam_filter')
    model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (5), filters = 256, strides=1, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())

    model.add(Conv1D (kernel_size = (5), filters = 300, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())
    model.add(Flatten(name='conv3'))
    model.add(Dense (1500, activation='elu', kernel_initializer='he_normal'))
    model.add(BatchNormalization(name='fc1'))
    model.add(Dropout(0.5))
    model.add(Dense (1500, activation='elu', kernel_initializer='he_normal'))
    model.add(BatchNormalization(name='fc2'))
    model.add(Dropout(0.5))
    model.add(Dense(n_classes, activation = 'softmax',name='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001))
    return model
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def cnn3adam_filter_l2(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
    """
    print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
    print('use more L2 model instead!')
    print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
    model = Sequential(name='cnn3adam_filter_l2')
    model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape, 
                      kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005))) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (5), filters = 256, strides=1, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005))) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())

    model.add(Conv1D (kernel_size = (5), filters = 300, strides=2, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005))) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())
    model.add(Flatten(name='conv3'))
    model.add(Dense (1500, activation='relu', kernel_initializer='he_normal',name='fc1'))
    model.add(BatchNormalization(name='bn1'))
    model.add(Dropout(0.5, name='do1'))
    model.add(Dense (1500, activation='relu', kernel_initializer='he_normal',name='fc2'))
    model.add(BatchNormalization(name='bn2'))
    model.add(Dropout(0.5, name='do2'))
    model.add(Dense(n_classes, activation = 'softmax',name='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001))
#    print('reset learning rate')
    return model
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def cnn3adam_filter_morel2_slim(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
    """
    model = Sequential(name='cnn3adam_filter_morel2_slim')
    model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape, 
                      kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.05))) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (5), filters = 128, strides=1, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.01))) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())
    model.add(Conv1D (kernel_size = (5), filters = 256, strides=2, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.01))) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())
    model.add(Flatten(name='conv3'))
    model.add(Dense (512, activation='relu', kernel_initializer='he_normal',name='fc1'))
    model.add(BatchNormalization(name='bn1'))
    model.add(Dropout(0.5, name='do1'))
    model.add(Dense (512, activation='relu', kernel_initializer='he_normal',name='fc2'))
    model.add(BatchNormalization(name='bn2'))
    model.add(Dropout(0.5, name='do2'))
    model.add(Dense(n_classes, activation = 'softmax',name='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001))
#    print('reset learning rate')
    return model
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def cnn1d(input_shape, n_classes ):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 1)
    """
    model = Sequential(name='1D CNN')
    model.add(Conv1D (kernel_size = (50), filters = 150, strides=5, input_shape=input_shape, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    print(model.output_shape)
    model.add(Conv1D (kernel_size = (8), filters = 200, strides=2, input_shape=input_shape, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    print(model.output_shape)
    model.add(MaxPooling1D(pool_size = (10), strides=(2)))
    print(model.output_shape)

    model.add(Conv1D (kernel_size = (8), filters = 400, strides=2, input_shape=input_shape, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    print(model.output_shape)
    model.add(Flatten())
    model.add(Dense (700, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense (700, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adadelta(), metrics=[keras.metrics.categorical_accuracy])
    return model
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def cnn1(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
    """
    model = Sequential(name='no_MP_small_filters')
    model.add(Conv1D (kernel_size = (10), filters = 64, strides=2, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (10), filters = 64, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (10), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (10), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (10), filters = 150, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Flatten())
    model.add(Dense (1024, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense (1024, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adadelta())
    return model
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def cnn3(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
    """
    model = Sequential(name='mixture')
    model.add(Conv1D (kernel_size = (50), filters = 64, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (5), filters = 128, strides=1, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())

    model.add(Conv1D (kernel_size = (5), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())

    model.add(Flatten())
    model.add(Dense (500, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense (500, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adadelta())
    return model
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def cnn4(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
    """
    model = Sequential(name='large_kernel')
    model.add(Conv1D (kernel_size = (100), filters = 128, strides=10, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(Conv1D (kernel_size = (100), filters = 128, strides=1, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (100), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Flatten())
    model.add(Dense (768, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense (768, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adadelta())
    return model
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def cnn5(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
    """
    model = Sequential(name='very_large_kernel')
    model.add(Conv1D (kernel_size = (200), filters = 128, strides=3, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(Conv1D (kernel_size = (200), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (200), filters = 128, strides=1, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (10), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Flatten())
    model.add(Dense (768, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense (768, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adadelta())
    return model
net.py 文件源码 项目:speechless 作者: JuliusKunze 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def input_to_prediction_length_ratio(self):
        """Returns which factor shorter the output is compared to the input caused by striding."""
        return reduce(lambda x, y: x * y,
                      [layer.strides[0] for layer in self.predictive_net.layers if
                       isinstance(layer, Conv1D)], 1)
sequence_encoders.py 文件源码 项目:keras-text 作者: raghakot 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, num_filters=64, filter_sizes=[3, 4, 5], dropout_rate=0.5, **conv_kwargs):
        """Yoon Kim's shallow cnn model: https://arxiv.org/pdf/1408.5882.pdf

        Args:
            num_filters: The number of filters to use per `filter_size`. (Default value = 64)
            filter_sizes: The filter sizes for each convolutional layer. (Default value = [3, 4, 5])
            **cnn_kwargs: Additional args for building the `Conv1D` layer.
        """
        super(YoonKimCNN, self).__init__(dropout_rate)
        self.num_filters = num_filters
        self.filter_sizes = filter_sizes
        self.conv_kwargs = conv_kwargs
sequence_encoders.py 文件源码 项目:keras-text 作者: raghakot 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def build_model(self, x):
        pooled_tensors = []
        for filter_size in self.filter_sizes:
            x_i = Conv1D(self.num_filters, filter_size, activation='elu', **self.conv_kwargs)(x)
            x_i = GlobalMaxPooling1D()(x_i)
            pooled_tensors.append(x_i)

        x = pooled_tensors[0] if len(self.filter_sizes) == 1 else concatenate(pooled_tensors, axis=-1)
        return x
model.py 文件源码 项目:mycroft 作者: wpm 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, training, sequence_length=None, vocabulary_size=None,
                 train_embeddings=SequentialTextEmbeddingClassifier.TRAIN_EMBEDDINGS, dropout=DROPOUT, filters=FILTERS,
                 kernel_size=KERNEL_SIZE, pool_factor=POOL_FACTOR, learning_rate=LEARNING_RATE,
                 language_model=LANGUAGE_MODEL):
        from keras.layers import Dropout, Conv1D, Flatten, MaxPooling1D, Dense
        from keras.models import Sequential
        from keras.optimizers import Adam

        label_names, sequence_length, vocabulary_size = self.parameters_from_training(sequence_length, vocabulary_size,
                                                                                      training, language_model)
        embedder = TextSequenceEmbedder(vocabulary_size, sequence_length, language_model)

        model = Sequential()
        model.add(self.embedding_layer(embedder, sequence_length, train_embeddings, name="embedding"))
        model.add(Conv1D(filters, kernel_size, padding="valid", activation="relu", strides=1, name="convolution"))
        model.add(MaxPooling1D(pool_size=pool_factor, name="pooling"))
        model.add(Flatten(name="flatten"))
        model.add(Dropout(dropout, name="dropout"))
        model.add(Dense(len(label_names), activation="softmax", name="softmax"))
        optimizer = Adam(lr=learning_rate)
        model.compile(optimizer=optimizer, loss="sparse_categorical_crossentropy", metrics=["accuracy"])

        self.filters = filters
        self.kernel_size = kernel_size
        self.pool_factor = pool_factor
        self.dropout = dropout
        super().__init__(model, embedder, label_names)
model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_model(timestep,input_dim,output_dim,dropout=0.5,recurrent_layers_num=4,cnn_layers_num=6,lr=0.001):
    inp = Input(shape=(timestep,input_dim))
    output = TimeDistributed(Masking(mask_value=0))(inp)
    #output = inp
    output = Conv1D(128, 1)(output)
    output = BatchNormalization()(output)
    output = Activation('relu')(output)

    output = first_block(output, (64, 128), dropout=dropout)


    output = Dropout(dropout)(output)
    for _ in range(cnn_layers_num):
        output = repeated_block(output, (64, 128), dropout=dropout)

    output = Flatten()(output)
    #output = LSTM(128, return_sequences=False)(output)

    output = BatchNormalization()(output)
    output = Activation('relu')(output)
    output = Dense(output_dim)(output)


    model = Model(inp,output)

    optimizer = Adam(lr=lr)

    model.compile(optimizer,'mse',['mae'])
    return model
test_views.py 文件源码 项目:Fabrik 作者: Cloud-CV 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Input2'], 'l2': net['Input4'], 'l3': net['Convolution']}
        # Conv 1D
        net['l1']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l1']
        net['l3']['params']['layer_type'] = '1D'
        net['l3']['shape']['input'] = net['l1']['shape']['output']
        net['l3']['shape']['output'] = [128, 12]
        inp = data(net['l1'], '', 'l1')['l1']
        temp = convolution(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[2].__class__.__name__, 'Conv1D')
        # Conv 2D
        net['l0']['connection']['output'].append('l0')
        net['l3']['connection']['input'] = ['l0']
        net['l3']['params']['layer_type'] = '2D'
        net['l3']['shape']['input'] = net['l0']['shape']['output']
        net['l3']['shape']['output'] = [128, 226, 226]
        inp = data(net['l0'], '', 'l0')['l0']
        temp = convolution(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[2].__class__.__name__, 'Conv2D')
        # Conv 3D
        net['l2']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l2']
        net['l3']['params']['layer_type'] = '3D'
        net['l3']['shape']['input'] = net['l2']['shape']['output']
        net['l3']['shape']['output'] = [128, 226, 226, 18]
        inp = data(net['l2'], '', 'l2')['l2']
        temp = convolution(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[2].__class__.__name__, 'Conv3D')
color_names.py 文件源码 项目:Color-Names 作者: airalcorn2 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def build_words2color_model(max_tokens, dim):
    """Build a model that learns to generate colors from words.

    :param max_tokens:
    :param dim:
    :return:
    """
    model = Sequential()
    model.add(Conv1D(128, 1, input_shape = (max_tokens, dim), activation = "tanh"))
    model.add(GlobalMaxPooling1D())
    model.add(Dropout(0.5))
    model.add(Dense(3))

    model.compile(loss = "mse", optimizer = "sgd")
    return model
drugai.py 文件源码 项目:DrugAI 作者: Gananath 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def Discriminator(y_dash, dropout=0.4, lr=0.00001, PATH="Dis.h5"):
    """Creates a discriminator model that takes an image as input and outputs a single value, representing whether
the input is real or generated. Unlike normal GANs, the output is not sigmoid and does not represent a probability!
Instead, the output should be as large and negative as possible for generated inputs and as large and positive
as possible for real inputs."""
    model = Sequential()
    model.add(Conv1D(input_shape=(y_dash.shape[1], y_dash.shape[2]),
                     nb_filter=25,
                     filter_length=4,
                     border_mode='same'))
    model.add(LeakyReLU())
    model.add(Dropout(dropout))
    model.add(MaxPooling1D())
    model.add(Conv1D(nb_filter=10,
                     filter_length=4,
                     border_mode='same'))
    model.add(LeakyReLU())
    model.add(Dropout(dropout))
    model.add(MaxPooling1D())
    model.add(Flatten())
    model.add(Dense(64))
    model.add(LeakyReLU())
    model.add(Dropout(dropout))
    model.add(Dense(1))
    model.add(Activation('linear'))

    opt = Adam(lr, beta_1=0.5, beta_2=0.9)

    #reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.9, patience=30, min_lr=0.000001, verbose=1)
    checkpoint_D = ModelCheckpoint(
        filepath=PATH, verbose=1, save_best_only=True)

    model.compile(optimizer=opt,
                  loss=wasserstein_loss,
                  metrics=['accuracy'])
    return model, checkpoint_D


问题


面经


文章

微信
公众号

扫码关注公众号