python类TimeDistributed()的实例源码

stateful_lstm_regression.py 文件源码 项目:plasma 作者: jnkh 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def build_model(predict,batch_size,length,featurelen):
    if predict:
        batch_size = length = 1
    model = Sequential()
    model.add(LSTM(10 ,return_sequences=True, batch_input_shape=(batch_size, length , featurelen), stateful=True))
    model.add(Dropout(0.2))
    model.add(LSTM(10 , return_sequences=True,stateful=True))
    model.add(Dropout(0.2))
    model.add(TimeDistributed(Dense( featurelen )))
    model.add(Activation('tanh'))
    model.compile(loss='mse', optimizer='rmsprop')
    model.reset_states()
    return model
RNN-CNN_feature_extract.py 文件源码 项目:Book_DeepLearning_Practice 作者: wac81 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def text_feature_extract_model1(embedding_size=128, hidden_size=256):
    '''
    this is a model use normal Bi-LSTM and maxpooling extract feature

    examples:
????????? [  1.62172219e-05]
???????? [  1.65377696e-05]
?????,??? [ 1.]
???????? [ 1.]
????????? [  1.76498161e-05]
??????????????16?12?????????? [  1.59666997e-05]
??????????????????? [ 1.]
?????????????? [  1.52662833e-05]
?????????????????????????????????? [ 1.]
???????????????????????????????????????? [  1.52281245e-05]
?????????????????????????? [ 1.]
??????????? [  1.59881820e-05]


    :return:
    '''
    model = Sequential()
    model.add(Embedding(input_dim=max_features,
                        output_dim=embedding_size,
                        input_length=max_seq))
    model.add(Bidirectional(LSTM(hidden_size, return_sequences=True)))
    model.add(TimeDistributed(Dense(embedding_size/2)))
    model.add(Activation('softplus'))
    model.add(MaxPooling1D(5))
    model.add(Flatten())
    # model.add(Dense(2048, activation='softplus'))
    # model.add(Dropout(0.2))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    model.summary()
    plot(model, to_file="text_feature_extract_model1.png", show_shapes=True)
    return model
hierarchical_layers.py 文件源码 项目:text_classification 作者: senochow 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def HierarchicalRNN(embed_matrix, max_words, ans_cnt, sequence_length, embedding_dim, lstm_dim=100):
    ''' Hierachical RNN model
        Input: (batch_size, answers, answer words)
    Args:
        embed_matrix: word embedding
        max words:    word dict size of embedding layer
        ans_cnt:      answer count
        sequence_length: answer words count
        embedding_dim: embedding dimention
        lstm_dim:
    '''
    hnn = Sequential()
    x = Input(shape=(ans_cnt, sequence_length))
    # 1. time distributed word embedding: (None, steps, words, embed_dim)
    words_embed = TimeDistributed(Embedding(max_words, embedding_dim,input_length=sequence_length,weights=[embed_matrix]))(x)
    # 2. word level lstm embedding: --> (None, steps/sentence_num, hidden/sent_words, hidden_dim)
    word_lstm = TimeDistributed(Bidirectional(MGU(lstm_dim, return_sequences=True)))(words_embed)

    # 3. average pooling : --> (None,steps,dim)
    word_avg = TimeDistributed(GlobalMaxPooling1D())(word_lstm)
    #word_avg = TimeDistributed(AttentionLayer(lstm_dim*2))(word_lstm)

    # 4.  sentence lstm:  --> (None, hidden, hidden_dim)
    sent_lstm = Bidirectional(MGU(lstm_dim, return_sequences=True))(word_avg)

    # 5. pooling:  --> (None, hidden_dim)
    sent_avg = GlobalMaxPooling1D()(sent_lstm)
    #sent_avg = AttentionLayer(lstm_dim*2)(sent_lstm)
    model = Model(input=x, output=sent_avg)
    hnn.add(model)
    return hnn


# vim: set expandtab ts=4 sw=4 sts=4 tw=100:
models.py 文件源码 项目:five-video-classification-methods 作者: harvitronix 项目源码 文件源码 阅读 14 收藏 0 点赞 0 评论 0
def lrcn(self):
        """Build a CNN into RNN.
        Starting version from:
            https://github.com/udacity/self-driving-car/blob/master/
                steering-models/community-models/chauffeur/models.py

        Heavily influenced by VGG-16:
            https://arxiv.org/abs/1409.1556

        Also known as an LRCN:
            https://arxiv.org/pdf/1411.4389.pdf
        """
        model = Sequential()

        model.add(TimeDistributed(Conv2D(32, (7, 7), strides=(2, 2),
            activation='relu', padding='same'), input_shape=self.input_shape))
        model.add(TimeDistributed(Conv2D(32, (3,3),
            kernel_initializer="he_normal", activation='relu')))
        model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

        model.add(TimeDistributed(Conv2D(64, (3,3),
            padding='same', activation='relu')))
        model.add(TimeDistributed(Conv2D(64, (3,3),
            padding='same', activation='relu')))
        model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

        model.add(TimeDistributed(Conv2D(128, (3,3),
            padding='same', activation='relu')))
        model.add(TimeDistributed(Conv2D(128, (3,3),
            padding='same', activation='relu')))
        model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

        model.add(TimeDistributed(Conv2D(256, (3,3),
            padding='same', activation='relu')))
        model.add(TimeDistributed(Conv2D(256, (3,3),
            padding='same', activation='relu')))
        model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

        model.add(TimeDistributed(Conv2D(512, (3,3),
            padding='same', activation='relu')))
        model.add(TimeDistributed(Conv2D(512, (3,3),
            padding='same', activation='relu')))
        model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

        model.add(TimeDistributed(Flatten()))

        model.add(Dropout(0.5))
        model.add(LSTM(256, return_sequences=False, dropout=0.5))
        model.add(Dense(self.nb_classes, activation='softmax'))

        return model
model.py 文件源码 项目:DeepNews 作者: kabrapratik28 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def create_model(self,):
        """
        RNN model creation
        Layers include Embedding Layer, 3 LSTM stacked,
        Simple Context layer (manually defined),
        Time Distributed Layer
        """
        length_vocab, embedding_size = self.word2vec.shape
        print ("shape of word2vec matrix ", self.word2vec.shape)

        model = Sequential()

        # TODO: look at mask zero flag
        model.add(
                Embedding(
                        length_vocab, embedding_size,
                        input_length=max_length,
                        weights=[self.word2vec], mask_zero=True,
                        name='embedding_layer'
                )
        )

        for i in range(rnn_layers):
            lstm = LSTM(rnn_size, return_sequences=True,
                name='lstm_layer_%d' % (i + 1)
            )

            model.add(lstm)
            # No drop out added !

        model.add(Lambda(self.simple_context,
                     mask=lambda inputs, mask: mask[:, max_len_desc:],
                     output_shape=self.output_shape_simple_context_layer,
                     name='simple_context_layer'))

        vocab_size = self.word2vec.shape[0]
        model.add(TimeDistributed(Dense(vocab_size,
                                name='time_distributed_layer')))

        model.add(Activation('softmax', name='activation_layer'))

        model.compile(loss='categorical_crossentropy', optimizer='adam')
        K.set_value(model.optimizer.lr, np.float32(learning_rate))
        print (model.summary())
        return model
keras_models.py 文件源码 项目:UK_Imbalance_Price_Forecasting 作者: ADGEfficiency 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def make_lstm(timestep,
              input_length,
              layer_nodes,
              dropout=0.35,
              optimizer='Adam',
              loss='mse'):
    """
    Creates a Long Short Term Memory (LSTM) neural network Keras model

    args
        timestep (int) : the length of the sequence
        input_length (int) : used to define input shape
        layer_nodes (list) : number of nodes in each of the layers input & hidden
        dropout (float) : the dropout rate to for the layer to layer connections
        optimizer (str) : reference to the Keras optimizer we want to use
        loss (str) : reference to the Keras loss function we want to use

    returns
        model (object) : the Keras LSTM neural network model
    """

    model = Sequential()

    #  first we add the input layer
    model.add(LSTM(units=layer_nodes[0],
                   input_shape=(timestep, input_length),
                   return_sequences=True))
    #  batch norm to normalize data going into the actvation functions
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    #  dropout some connections into the first hidden layer
    model.add(Dropout(dropout))

    #  now add hideen layers using the same strucutre
    for nodes in layer_nodes[1:]:
        model.add(LSTM(units=nodes, return_sequences=True))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(Dropout(dropout))

    #  add the output layer with a linear activation function
    #  we use a node size of 1 hard coded because we make one prediction
    #  per time step
    model.add(TimeDistributed(Dense(1)))
    model.add(Activation('linear'))

    #  compile model using user defined loss function and optimizer
    model.compile(loss=loss, optimizer=optimizer)
    print(model.summary())

    return model
15-keras_seq2seq_mod.py 文件源码 项目:albemarle 作者: SeanTater 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, output_dim, hidden_dim, output_length, depth=1,bidirectional=True, dropout=0.1, **kwargs):
        if bidirectional and hidden_dim % 2 != 0:
            raise Exception ("hidden_dim for AttentionSeq2seq should be even (Because of bidirectional RNN).")
        super(AttentionSeq2seq, self).__init__()
        if type(depth) not in [list, tuple]:
            depth = (depth, depth)
        if 'batch_input_shape' in kwargs:
            shape = kwargs['batch_input_shape']
            del kwargs['batch_input_shape']
        elif 'input_shape' in kwargs:
            shape = (None,) + tuple(kwargs['input_shape'])
            del kwargs['input_shape']
        elif 'input_dim' in kwargs:
            if 'input_length' in kwargs:
                input_length = kwargs['input_length']
            else:
                input_length = None
            shape = (None, input_length, kwargs['input_dim'])
            del kwargs['input_dim']
        self.add(Layer(batch_input_shape=shape))
        if bidirectional:
            self.add(Bidirectional(LSTMEncoder(output_dim=int(hidden_dim / 2), state_input=False, return_sequences=True, **kwargs)))
        else:
            self.add(LSTMEncoder(output_dim=hidden_dim, state_input=False, return_sequences=True, **kwargs))
        for i in range(0, depth[0] - 1):
            self.add(Dropout(dropout))
            if bidirectional:
                self.add(Bidirectional(LSTMEncoder(output_dim=int(hidden_dim / 2), state_input=False, return_sequences=True, **kwargs)))
            else:
                self.add(LSTMEncoder(output_dim=hidden_dim, state_input=False, return_sequences=True, **kwargs))
        encoder = self.layers[-1]
        self.add(Dropout(dropout))
        self.add(TimeDistributed(Dense(hidden_dim if depth[1] > 1 else output_dim)))
        decoder = AttentionDecoder(hidden_dim=hidden_dim, output_length=output_length, state_input=False, **kwargs)
        self.add(Dropout(dropout))
        self.add(decoder)
        for i in range(0, depth[1] - 1):
            self.add(Dropout(dropout))
            self.add(LSTMEncoder(output_dim=hidden_dim, state_input=False, return_sequences=True, **kwargs))
        self.add(Dropout(dropout))
        self.add(TimeDistributed(Dense(output_dim, activation='softmax')))
        self.encoder = encoder
        self.decoder = decoder
15-keras_seq2seq_mod.py 文件源码 项目:albemarle 作者: SeanTater 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, output_dim, hidden_dim, output_length, depth=1, broadcast_state=True, inner_broadcast_state=True, peek=False, dropout=0.1, **kwargs):
        super(Seq2seq, self).__init__()
        if type(depth) not in [list, tuple]:
            depth = (depth, depth)
        if 'batch_input_shape' in kwargs:
            shape = kwargs['batch_input_shape']
            del kwargs['batch_input_shape']
        elif 'input_shape' in kwargs:
            shape = (None,) + tuple(kwargs['input_shape'])
            del kwargs['input_shape']
        elif 'input_dim' in kwargs:
            shape = (None, None, kwargs['input_dim'])
            del kwargs['input_dim']
        lstms = []
        layer = LSTMEncoder(batch_input_shape=shape, output_dim=hidden_dim, state_input=False, return_sequences=depth[0] > 1, **kwargs)
        self.add(layer)
        lstms += [layer]
        for i in range(depth[0] - 1):
            self.add(Dropout(dropout))
            layer = LSTMEncoder(output_dim=hidden_dim, state_input=inner_broadcast_state, return_sequences=i < depth[0] - 2, **kwargs)
            self.add(layer)
            lstms += [layer]
        if inner_broadcast_state:
            for i in range(len(lstms) - 1):
                lstms[i].broadcast_state(lstms[i + 1])
        encoder = self.layers[-1]
        self.add(Dropout(dropout))
        decoder_type = LSTMDecoder2 if peek else LSTMDecoder
        decoder = decoder_type(hidden_dim=hidden_dim, output_length=output_length, state_input=broadcast_state, **kwargs)
        self.add(decoder)
        lstms = [decoder]
        for i in range(depth[1] - 1):
            self.add(Dropout(dropout))
            layer = LSTMEncoder(output_dim=hidden_dim, state_input=inner_broadcast_state, return_sequences=True, **kwargs)
            self.add(layer)
            lstms += [layer]
        if inner_broadcast_state:
                for i in range(len(lstms) - 1):
                    lstms[i].broadcast_state(lstms[i + 1])
        if broadcast_state:
            encoder.broadcast_state(decoder)
        self.add(Dropout(dropout))
        self.add(TimeDistributed(Dense(output_dim, **kwargs)))
        self.encoder = encoder
        self.decoder = decoder
network.py 文件源码 项目:cocktail-party 作者: avivga 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def build(video_shape, audio_spectrogram_size):
        model = Sequential()

        model.add(ZeroPadding3D(padding=(1, 2, 2), name='zero1', input_shape=video_shape))
        model.add(Convolution3D(32, (3, 5, 5), strides=(1, 2, 2), kernel_initializer='he_normal', name='conv1'))
        model.add(BatchNormalization())
        model.add(LeakyReLU())
        model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max1'))
        model.add(Dropout(0.25))

        model.add(ZeroPadding3D(padding=(1, 2, 2), name='zero2'))
        model.add(Convolution3D(64, (3, 5, 5), strides=(1, 1, 1), kernel_initializer='he_normal', name='conv2'))
        model.add(BatchNormalization())
        model.add(LeakyReLU())
        model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max2'))
        model.add(Dropout(0.25))

        model.add(ZeroPadding3D(padding=(1, 1, 1), name='zero3'))
        model.add(Convolution3D(128, (3, 3, 3), strides=(1, 1, 1), kernel_initializer='he_normal', name='conv3'))
        model.add(BatchNormalization())
        model.add(LeakyReLU())
        model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max3'))
        model.add(Dropout(0.25))

        model.add(TimeDistributed(Flatten(), name='time'))

        model.add(Dense(1024, kernel_initializer='he_normal', name='dense1'))
        model.add(BatchNormalization())
        model.add(LeakyReLU())
        model.add(Dropout(0.25))

        model.add(Dense(1024, kernel_initializer='he_normal', name='dense2'))
        model.add(BatchNormalization())
        model.add(LeakyReLU())
        model.add(Dropout(0.25))

        model.add(Flatten())

        model.add(Dense(2048, kernel_initializer='he_normal', name='dense3'))
        model.add(BatchNormalization())
        model.add(LeakyReLU())
        model.add(Dropout(0.25))

        model.add(Dense(2048, kernel_initializer='he_normal', name='dense4'))
        model.add(BatchNormalization())
        model.add(LeakyReLU())
        model.add(Dropout(0.25))

        model.add(Dense(audio_spectrogram_size, name='output'))

        model.summary()

        return VideoToSpeechNet(model)


问题


面经


文章

微信
公众号

扫码关注公众号