python类Dropout()的实例源码

model.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def build_encoder(self,input_shape):
        return [Reshape((*input_shape,1)),
                GaussianNoise(self.parameters['noise']),
                BN(),
                *[Convolution2D(self.parameters['clayer'],(3,3),
                                activation=self.parameters['activation'],padding='same', use_bias=False),
                  Dropout(self.parameters['dropout']),
                  BN(),
                  MaxPooling2D((2,2)),],
                *[Convolution2D(self.parameters['clayer'],(3,3),
                                activation=self.parameters['activation'],padding='same', use_bias=False),
                  Dropout(self.parameters['dropout']),
                  BN(),
                  MaxPooling2D((2,2)),],
                flatten,
                Sequential([
                    Dense(self.parameters['layer'], activation=self.parameters['activation'], use_bias=False),
                    BN(),
                    Dropout(self.parameters['dropout']),
                    Dense(self.parameters['N']*self.parameters['M']),
                ])]
model.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def build_encoder(self,input_shape):
        return [Reshape((*input_shape,1)),
                GaussianNoise(0.1),
                BN(),
                Convolution2D(self.parameters['clayer'],(3,3),
                              activation=self.parameters['activation'],padding='same', use_bias=False),
                Dropout(self.parameters['dropout']),
                BN(),
                MaxPooling2D((2,2)),
                Convolution2D(self.parameters['clayer'],(3,3),
                              activation=self.parameters['activation'],padding='same', use_bias=False),
                Dropout(self.parameters['dropout']),
                BN(),
                MaxPooling2D((2,2)),
                Convolution2D(self.parameters['clayer'],(3,3),
                              activation=self.parameters['activation'],padding='same', use_bias=False),
                Dropout(self.parameters['dropout']),
                BN(),
                MaxPooling2D((2,2)),
                flatten,]
windpuller.py 文件源码 项目:DeepTrade_keras 作者: happynoom 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, input_shape, lr=0.01, n_layers=2, n_hidden=8, rate_dropout=0.2, loss='risk_estimation'):
        print("initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s." %(lr, n_layers, n_hidden, rate_dropout))
        self.model = Sequential()
        self.model.add(Dropout(rate=rate_dropout, input_shape=(input_shape[0], input_shape[1])))
        for i in range(0, n_layers - 1):
            self.model.add(LSTM(n_hidden * 4, return_sequences=True, activation='tanh',
                                recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                                recurrent_initializer='orthogonal', bias_initializer='zeros',
                                dropout=rate_dropout, recurrent_dropout=rate_dropout))
        self.model.add(LSTM(n_hidden, return_sequences=False, activation='tanh',
                                recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                                recurrent_initializer='orthogonal', bias_initializer='zeros',
                                dropout=rate_dropout, recurrent_dropout=rate_dropout))
        self.model.add(Dense(1, kernel_initializer=initializers.glorot_uniform()))
        # self.model.add(BatchNormalization(axis=-1, moving_mean_initializer=Constant(value=0.5),
        #               moving_variance_initializer=Constant(value=0.25)))
        self.model.add(BatchRenormalization(axis=-1, beta_init=Constant(value=0.5)))
        self.model.add(Activation('relu_limited'))
        opt = RMSprop(lr=lr)
        self.model.compile(loss=loss,
                      optimizer=opt,
                      metrics=['accuracy'])
model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv1D(k1,1,padding='same')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
encoders.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get_encoded_phrase(self, phrase_input_layer, dropout={}, embedding=None):
        '''
        Takes a Keras input layer, dropout and returns the output of the encoder as a Keras object.

        Arguments:
            phrase_input_layer (Input): Keras Input layer of the appropriate shape.
            dropout (dict [str -> float]): Dict containing dropout values applied after
            `embedding` and `encoder`.
            embedding_file (str): Optional gzipped embedding file to use as initialization
            for embedding layer.
        '''
        embedding_layer = self._get_embedding_layer(embedding)
        embedded_phrase = embedding_layer(phrase_input_layer)
        embedding_dropout = dropout.pop("embedding", 0.0)
        if embedding_dropout > 0:
            embedded_phrase = Dropout(embedding_dropout)(embedded_phrase)
        encoder = self._get_encoder_layer()
        encoded_phrase = encoder(embedded_phrase)
        encoder_dropout = dropout.pop("encoder", 0.0)
        if encoder_dropout > 0:
            encoded_phrase = Dropout(encoder_dropout)(encoded_phrase)
        return encoded_phrase
example_gan_convolutional.py 文件源码 项目:Deep-Learning-with-Keras 作者: PacktPublishing 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def model_discriminator(input_shape=(1, 28, 28), dropout_rate=0.5):
    d_input = dim_ordering_input(input_shape, name="input_x")
    nch = 512
    # nch = 128
    H = Convolution2D(int(nch / 2), 5, 5, subsample=(2, 2), border_mode='same', activation='relu')(d_input)
    H = LeakyReLU(0.2)(H)
    H = Dropout(dropout_rate)(H)
    H = Convolution2D(nch, 5, 5, subsample=(2, 2), border_mode='same', activation='relu')(H)
    H = LeakyReLU(0.2)(H)
    H = Dropout(dropout_rate)(H)
    H = Flatten()(H)
    H = Dense(int(nch / 2))(H)
    H = LeakyReLU(0.2)(H)
    H = Dropout(dropout_rate)(H)
    d_V = Dense(1, activation='sigmoid')(H)
    return Model(d_input, d_V)
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def largeann(input_shape, n_classes, layers=3, neurons=2000, dropout=0.35 ):
    """
    for working with extracted features
    """
#    gpu = switch_gpu()
#    with K.tf.device('/gpu:{}'.format(gpu)):
#        K.set_session(K.tf.Session(config=K.tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)))
    model = Sequential(name='ann')
#    model.gpu = gpu
    for l in range(layers):
        model.add(Dense (neurons, input_shape=input_shape, activation='elu', kernel_initializer='he_normal'))
        model.add(BatchNormalization())
        model.add(Dropout(dropout))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[keras.metrics.categorical_accuracy])
    return model

#%% everyhing recurrent for ANN
han1.py 文件源码 项目:3HAN 作者: ni9elf 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def HAN1(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
    #model = Sequential()
    wordInputs = Input(shape=(MAX_WORDS,), name='word1', dtype='float32')

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=True, trainable=True, name='emb1')(wordInputs) #Assuming all the sentences have same number of words. Check for input_length again.


    hij = Bidirectional(GRU(WORDGRU, name='gru1', return_sequences=True))(wordEmbedding)


    wordDrop = Dropout(DROPOUTPER, name='drop1')(hij)

    alpha_its, Si = AttentionLayer(name='att1')(wordDrop)   

    v6 = Dense(1, activation="sigmoid", name="dense")(Si)
    #model.add(Dense(1, activation="sigmoid", name="documentOut3"))
    model = Model(inputs=[wordInputs] , outputs=[v6])
    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    return model
layers.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def answer_start_pred(context_encoding, question_attention_vector, context_mask, W, dropout_rate):
    """Answer start prediction layer."""

    answer_start = Lambda(lambda arg:
                          concatenate([arg[0], arg[1], arg[2]]))([
        context_encoding,
        question_attention_vector,
        multiply([context_encoding, question_attention_vector])])

    answer_start = TimeDistributed(Dense(W, activation='relu'))(answer_start)
    answer_start = Dropout(rate=dropout_rate)(answer_start)
    answer_start = TimeDistributed(Dense(1))(answer_start)

    # apply masking
    answer_start = Lambda(lambda q: masked_softmax(q[0], q[1]))([answer_start, context_mask])
    answer_start = Lambda(lambda q: flatten(q))(answer_start)
    return answer_start
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def create_lstm_layer(self, input_dim):
        """Create a LSTM layer of a model."""

        inp = Input(shape=(input_dim, self.embedding_dim,))
        inp_dropout = Dropout(self.ldrop_val)(inp)
        ker_in = glorot_uniform(seed=self.seed)
        rec_in = Orthogonal(seed=self.seed)
        outp = LSTM(self.hidden_dim, input_shape=(input_dim, self.embedding_dim,),
                    kernel_regularizer=None,
                    recurrent_regularizer=None,
                    bias_regularizer=None,
                    activity_regularizer=None,
                    recurrent_dropout=self.recdrop_val,
                    dropout=self.inpdrop_val,
                    kernel_initializer=ker_in,
                    recurrent_initializer=rec_in,
                    return_sequences=True)(inp_dropout)
        outp_dropout = Dropout(self.dropout_val)(outp)
        model = Model(inputs=inp, outputs=outp_dropout, name="LSTM_encoder")
        return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def create_lstm_layer_2(self, input_dim):
        """Create a LSTM layer of a model."""

        inp = Input(shape=(input_dim, 2*self.perspective_num,))
        inp_drop = Dropout(self.ldrop_val)(inp)
        ker_in = glorot_uniform(seed=self.seed)
        rec_in = Orthogonal(seed=self.seed)
        bioutp = Bidirectional(LSTM(self.aggregation_dim,
                                    input_shape=(input_dim, 2*self.perspective_num,),
                                    kernel_regularizer=None,
                                    recurrent_regularizer=None,
                                    bias_regularizer=None,
                                    activity_regularizer=None,
                                    recurrent_dropout=self.recdrop_val,
                                    dropout=self.inpdrop_val,
                                    kernel_initializer=ker_in,
                                    recurrent_initializer=rec_in,
                                    return_sequences=True), merge_mode=None)(inp_drop)
        dropout_forw = Dropout(self.dropout_val)(bioutp[0])
        dropout_back = Dropout(self.dropout_val)(bioutp[1])
        model = Model(inputs=inp, outputs=[dropout_forw, dropout_back], name="biLSTM_enc_persp")
        return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def create_lstm_layer_last(self, input_dim):
        """Create a LSTM layer of a model."""

        inp = Input(shape=(input_dim,  self.embedding_dim,))
        inp_drop = Dropout(self.ldrop_val)(inp)
        ker_in = glorot_uniform(seed=self.seed)
        rec_in = Orthogonal(seed=self.seed)
        bioutp = Bidirectional(LSTM(self.hidden_dim,
                                    input_shape=(input_dim, self.embedding_dim,),
                                    kernel_regularizer=None,
                                    recurrent_regularizer=None,
                                    bias_regularizer=None,
                                    activity_regularizer=None,
                                    recurrent_dropout=self.recdrop_val,
                                    dropout=self.inpdrop_val,
                                    kernel_initializer=ker_in,
                                    recurrent_initializer=rec_in,
                                    return_sequences=False), merge_mode='concat')(inp_drop)
        dropout = Dropout(self.dropout_val)(bioutp)
        model = Model(inputs=inp, outputs=dropout, name="biLSTM_encoder_last")
        return model
GoogleNet.py 文件源码 项目:AerialCrackDetection_Keras 作者: TTMRonald 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def classifier(base_layers, input_rois, batch_size, nb_classes = 3, trainable=False):

    # compile times tend to be very high, so we use smaller ROI pooling regions to workaround

    if K.backend() == 'tensorflow':
        pooling_regions = 14
        input_shape = (batch_size,14,14,2048)
    elif K.backend() == 'theano':
        pooling_regions = 7
        input_shape = (batch_size,2048,7,7)

    out_roi_pool = RoiPoolingConv(pooling_regions, batch_size)([base_layers, input_rois])
    out = TimeDistributed(Flatten())(out_roi_pool)
#    out = TimeDistributed(Dropout(0.4))(out)
#    out = TimeDistributed(Dense(2048,activation='relu'))(out)

    out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)
    # note: no regression target for bg class
    out_regr = TimeDistributed(Dense(4 * nb_classes, activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)
    return [out_class, out_regr]
ZF.py 文件源码 项目:AerialCrackDetection_Keras 作者: TTMRonald 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def classifier(base_layers, input_rois, batch_size, nb_classes = 3, trainable=False):

    # compile times tend to be very high, so we use smaller ROI pooling regions to workaround

    if K.backend() == 'tensorflow':
        pooling_regions = 14
        input_shape = (batch_size,14,14,512)
    elif K.backend() == 'theano':
        pooling_regions = 7
        input_shape = (batch_size,512,7,7)

    out_roi_pool = RoiPoolingConv(pooling_regions, batch_size)([base_layers, input_rois])
    out = TimeDistributed(Flatten())(out_roi_pool)
    out = TimeDistributed(Dense(4096,activation='relu'))(out)
    out = TimeDistributed(Dropout(0.5))(out)
    out = TimeDistributed(Dense(4096,activation='relu'))(out)
    out = TimeDistributed(Dropout(0.5))(out)

    out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)
    # note: no regression target for bg class
    out_regr = TimeDistributed(Dense(4 * nb_classes, activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)
    return [out_class, out_regr]
VGG_16.py 文件源码 项目:AerialCrackDetection_Keras 作者: TTMRonald 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def classifier(base_layers, input_rois, batch_size, nb_classes = 3, trainable=False):

    # compile times tend to be very high, so we use smaller ROI pooling regions to workaround
    if K.backend() == 'tensorflow':
        pooling_regions = 14
        input_shape = (batch_size,14,14,1024)
    elif K.backend() == 'theano':
        pooling_regions = 7
        input_shape = (batch_size,1024,7,7)

    out_roi_pool = RoiPoolingConv(pooling_regions, batch_size)([base_layers, input_rois])
    out = TimeDistributed(Flatten())(out_roi_pool)
    out = TimeDistributed(Dense(4096,activation='relu'))(out)
    out = TimeDistributed(Dropout(0.5))(out)
    out = TimeDistributed(Dense(4096,activation='relu'))(out)
    out = TimeDistributed(Dropout(0.5))(out)

    out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)
    # note: no regression target for bg class
    out_regr = TimeDistributed(Dense(4 * nb_classes, activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)
    return [out_class, out_regr]
vgg.py 文件源码 项目:keras-frcnn 作者: yhenon 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def classifier(base_layers, input_rois, num_rois, nb_classes = 21, trainable=False):

    # compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround

    if K.backend() == 'tensorflow':
        pooling_regions = 7
        input_shape = (num_rois,7,7,512)
    elif K.backend() == 'theano':
        pooling_regions = 7
        input_shape = (num_rois,512,7,7)

    out_roi_pool = RoiPoolingConv(pooling_regions, num_rois)([base_layers, input_rois])

    out = TimeDistributed(Flatten(name='flatten'))(out_roi_pool)
    out = TimeDistributed(Dense(4096, activation='relu', name='fc1'))(out)
    out = TimeDistributed(Dropout(0.5))(out)
    out = TimeDistributed(Dense(4096, activation='relu', name='fc2'))(out)
    out = TimeDistributed(Dropout(0.5))(out)

    out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)
    # note: no regression target for bg class
    out_regr = TimeDistributed(Dense(4 * (nb_classes-1), activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)

    return [out_class, out_regr]
model.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def build_encoder(self,input_shape):
        return [GaussianNoise(self.parameters['noise']),
                BN(),
                Dense(self.parameters['layer'], activation='relu', use_bias=False),
                BN(),
                Dropout(self.parameters['dropout']),
                Dense(self.parameters['layer'], activation='relu', use_bias=False),
                BN(),
                Dropout(self.parameters['dropout']),
                Dense(self.parameters['layer'], activation='relu', use_bias=False),
                BN(),
                Dropout(self.parameters['dropout']),
                Dense(self.parameters['N']*self.parameters['M']),]
model.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def _build(self,input_shape):
        x = Input(shape=input_shape)
        N = input_shape[0] // 2

        y = Sequential([
            flatten,
            *[Sequential([BN(),
                          Dense(self.parameters['layer'],activation=self.parameters['activation']),
                          Dropout(self.parameters['dropout']),])
              for i in range(self.parameters['num_layers']) ],
            Dense(1,activation="sigmoid")
        ])(x)

        self.loss = bce
        self.net = Model(x, y)
        # self.callbacks.append(self.linear_schedule([0.2,0.5], 0.1))
        self.callbacks.append(GradientEarlyStopping(verbose=1,epoch=50,min_grad=self.parameters['min_grad']))
        # self.custom_log_functions['lr'] = lambda: K.get_value(self.net.optimizer.lr)
sequence_encoders.py 文件源码 项目:keras-text 作者: raghakot 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __call__(self, x):
        """Build the actual model here.

        Args:
            x: The encoded or embedded input sequence.

        Returns:
            The model output tensor.
        """
        # Avoid mask propagation when dynamic mini-batches are not supported.
        if not self.allows_dynamic_length():
            x = ConsumeMask()(x)

        x = self.build_model(x)
        if self.dropout_rate > 0:
            x = Dropout(self.dropout_rate)(x)
        return x
lstm_bilstm.py 文件源码 项目:sota_sentiment 作者: jbarnesspain 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def create_BiLSTM(wordvecs, lstm_dim=300, output_dim=2, dropout=.5,
                weights=None, train=True):
    model = Sequential()
    if weights != None:
        model.add(Embedding(len(wordvecs)+1,
            len(wordvecs['the']),
            weights=[weights],
                    trainable=train))
    else:
        model.add(Embedding(len(wordvecs)+1,
            len(wordvecs['the']),
                    trainable=train))
    model.add(Dropout(dropout))
    model.add(Bidirectional(LSTM(lstm_dim)))
    model.add(Dropout(dropout))
    model.add(Dense(output_dim, activation='softmax'))
    if output_dim == 2:
        model.compile('adam', 'binary_crossentropy',
                  metrics=['accuracy'])
    else:
        model.compile('adam', 'categorical_crossentropy',
                  metrics=['accuracy'])
    return model
mnist_sklearn_wrapper.py 文件源码 项目:pCVR 作者: xjtushilei 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def make_model(dense_layer_sizes, filters, kernel_size, pool_size):
    '''Creates model comprised of 2 convolutional layers followed by dense layers

    dense_layer_sizes: List of layer sizes.
        This list has one number for each layer
    filters: Number of convolutional filters in each convolutional layer
    kernel_size: Convolutional kernel size
    pool_size: Size of pooling area for max pooling
    '''

    model = Sequential()
    model.add(Conv2D(filters, kernel_size,
                     padding='valid',
                     input_shape=input_shape))
    model.add(Activation('relu'))
    model.add(Conv2D(filters, kernel_size))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Dropout(0.25))

    model.add(Flatten())
    for layer_size in dense_layer_sizes:
        model.add(Dense(layer_size))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])

    return model
model.py 文件源码 项目:Kiddo 作者: Subarno 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def load_model(input_shape, num_classes):
    model = Sequential()

    model.add(Convolution2D(6, kernel_size=(3, 3), activation='relu', input_shape=input_shape, padding="same"))
    model.add(Convolution2D(32, kernel_size=(3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(64, kernel_size=(3, 3), border_mode='same', activation='relu'))
    model.add(Convolution2D(64, kernel_size=(3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))

    return model
model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv1D(k1,kernel_size,strides=2,padding='same')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,strides=2,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,strides=4,padding='same')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def build_simple_rnn_model(timestep,input_dim,output_dim,dropout=0.4,lr=0.001):
    input = Input((timestep,input_dim))
    # LSTM, Single
    output = LSTM(50,return_sequences=False)(input)
    # for _ in range(1):
    #     output = LSTM(32,return_sequences=True)(output)
    # output = LSTM(50,return_sequences=False)(output)
    output = Dropout(dropout)(output)
    output = Dense(output_dim)(output)

    model =  Model(inputs=input,outputs=output)

    optimizer = Adam(lr=lr)

    model.compile(loss='mae',optimizer=optimizer,metrics=['mse'])

    return model
model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv1D(k1,1,padding='same')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,strides=2,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv1D(k1,kernel_size,padding='same')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,strides=2,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv2D(k1,kernel_size,2,padding='same',data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,2,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
rnn.py 文件源码 项目:rupo 作者: IlyaGusev 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def build(self) -> None:
        """
        ?????????? ??????.
        """
        inp = Input(shape=(None,))

        emb = Embedding(len(self.grapheme_alphabet), self.emb_dimension)(inp)
        encoded = Bidirectional(self.rnn(self.units1, return_sequences=True, recurrent_dropout=self.dropout))(emb)
        encoded = Dropout(self.dropout)(encoded)
        decoded = TimeDistributed(Dense(self.units2, activation="relu"))(encoded)
        predictions = TimeDistributed(Dense(len(self.phonetic_alphabet), activation="softmax"))(decoded)

        model = Model(inputs=inp, outputs=predictions)
        model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
        print(model.summary())
        self.model = model
grapheme_rnn.py 文件源码 项目:rupo 作者: IlyaGusev 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def build(self) -> None:
        """
        ?????????? ??????. 
        """
        inp = Input(shape=(None,))

        emb = Embedding(len(self.grapheme_set), self.emb_dimension)(inp)
        encoded = Bidirectional(self.rnn(self.units, return_sequences=True, recurrent_dropout=self.dropout))(emb)
        encoded = Dropout(self.dropout)(encoded)
        decoded = TimeDistributed(Dense(self.units, activation="relu"))(encoded)
        predictions = TimeDistributed(Dense(3, activation="softmax"))(decoded)

        model = Model(inputs=inp, outputs=predictions)
        model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
        print(model.summary())
        self.model = model
phoneme_rnn.py 文件源码 项目:rupo 作者: IlyaGusev 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def build(self) -> None:
        """
        ?????????? ??????. 
        """
        inp = Input(shape=(None,))

        emb = Embedding(len(self.phonetic_alphabet), self.emb_dimension)(inp)
        encoded = Bidirectional(self.rnn(self.units, return_sequences=True, recurrent_dropout=self.dropout))(emb)
        encoded = Dropout(self.dropout)(encoded)
        decoded = Bidirectional(self.rnn(self.units, return_sequences=True, recurrent_dropout=self.dropout))(encoded)
        decoded = Dropout(self.dropout)(decoded)
        predictions = TimeDistributed(Dense(3, activation="softmax"))(decoded)

        model = Model(inputs=inp, outputs=predictions)
        model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
        print(model.summary())
        self.model = model


问题


面经


文章

微信
公众号

扫码关注公众号