python类Embedding()的实例源码

dga_lstm.py 文件源码 项目:stratosphere-lstm 作者: mendozawow 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def build_lstm(input_shape):
    model = Sequential()
    # model.add(Masking(input_shape=input_shape, mask_value=-1.))
    model.add(Embedding(input_shape[0], 128, input_length=input_shape[1]))

    model.add(Convolution1D(nb_filter=64,
                            filter_length=5,
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=model.output_shape[1]))

    model.add(Flatten())

    model.add(Dense(128))

    # model.add(GRU(128, return_sequences=False))
    # Add dropout if overfitting
    # model.add(Dropout(0.5))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
nn.py 文件源码 项目:event_chain 作者: wangzq870305 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def lstm_train(X_train,y_train,vocab_size):

    X_train = sequence.pad_sequences(X_train, maxlen=MAX_LEN)

    main_input = Input(shape=(MAX_LEN,), dtype='int32')

    x = Embedding(output_dim=EMBED_SIZE, input_dim=vocab_size, input_length=MAX_LEN)(main_input)

    lstm_out = LSTM(HIDDEN_SIZE)(x)

    main_loss = Dense(1, activation='sigmoid', name='main_output')(lstm_out)

    model = Model(input=main_input, output=main_loss)

    model.compile(loss='binary_crossentropy', optimizer='rmsprop')
    model.fit(X_train, y_train, batch_size=BATCH_SIZE, nb_epoch=EPOCHS)

    return model
model.py 文件源码 项目:unblackboxing_webinar 作者: deepsense-ai 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _make_embedding_layer(self, word_index):
        embeddings = self._get_embeddings()
        nb_words = min(self.max_nr_words, len(word_index))
        embedding_matrix = np.zeros((nb_words, self.embedding_dim))

        for word, i in word_index.items():
            if i >= self.max_nr_words:
                continue
            embedding_vector = embeddings.get(word)
            if embedding_vector is not None:
                embedding_matrix[i] = embedding_vector

        embedding_layer = Embedding(nb_words, self.embedding_dim, 
                                    weights=[embedding_matrix], 
                                    input_length=self.sequence_length, trainable=False)
        return embedding_layer
learning_model.py 文件源码 项目:Auto-correction-for-transliterated-queries 作者: GauravBh1010tt 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def train(self,data):
        print 'building model.....'
        self.prepare_data(data,re_train=True)
        inputs = Input(shape=(self.step,),dtype='int32')
        embed = Embedding(self.vocab_size,self.embedding_dims,input_length=self.step)(inputs)
        encode = LSTM(128)(embed)
        pred = Dense(self.vocab_size,activation='softmax')(encode)
        model = Model(input=inputs,output=pred)
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        history = LossHistory()
        model.fit(self.X_data, self.y_data,
                  batch_size=self.batch_size,
                  nb_epoch=self.nb_epoch,callbacks=[history])
        #self.avg_loss = loss.history['loss']
        self.history = history
        with open('history','wb') as h:
            pickle.dump(history.losses,h)

        model_json = model.to_json()
        with open("model.json", "w") as json_file:
            json_file.write(model_json)
        # serialize weights to HDF5
        model.save_weights("model.h5")
learning_model.py 文件源码 项目:Auto-correction-for-transliterated-queries 作者: GauravBh1010tt 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def train(self,data):
        print 'building model.....'
        self.prepare_data(data,re_train=True)
        inputs = Input(shape=(self.step,),dtype='int32')
        embed = Embedding(self.vocab_size,self.embedding_dims,input_length=self.step)(inputs)
        encode = LSTM(128)(embed)
        pred = Dense(self.vocab_size,activation='softmax')(encode)
        model = Model(input=inputs,output=pred)
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        history = LossHistory()
        model.fit(self.X_data, self.y_data,
                  batch_size=self.batch_size,
                  nb_epoch=self.nb_epoch,callbacks=[history])
        #self.avg_loss = loss.history['loss']
        self.history = history
        with open('history','wb') as h:
            pickle.dump(history.losses,h)

        model_json = model.to_json()
        with open("model.json", "w") as json_file:
            json_file.write(model_json)
        # serialize weights to HDF5
        model.save_weights("model.h5")
convNet.py 文件源码 项目:semeval2017-scienceie 作者: UKPLab 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def build_cnn_char(input_dim, output_dim,nb_filter):
    clf = Sequential()
    clf.add(Embedding(input_dim,
                      32, # character embedding size
                      input_length=maxlen,
                      dropout=0.2))
    clf.add(Convolution1D(nb_filter=nb_filter,
                          filter_length=3,border_mode="valid",activation="relu",subsample_length=1))
    clf.add(GlobalMaxPooling1D())
    clf.add(Dense(100))
    clf.add(Dropout(0.2))
    clf.add(Activation("tanh"))
    clf.add(Dense(output_dim=output_dim, activation='softmax'))

    clf.compile(optimizer='adagrad',
                     loss='categorical_crossentropy',
                     metrics=['accuracy'])
    return clf

# just one filter
text_model.py 文件源码 项目:text_classification 作者: senochow 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def CNNWithKeywordLayer(embed_matrix, embed_input, sequence_length, keywords_length, filter_sizes, num_filters, dropout_prob, hidden_dims, model_variation, embedding_dim=300):
    ''' 2-way input model: left is cnn for sentence embedding while right is keywords

    '''
    embed1 = Embedding(embed_input, embedding_dim,input_length=sequence_length, weights=[embed_matrix])
    # 1. question model part
    question_branch = Sequential()
    cnn_model = TextCNN(sequence_length, embedding_dim, filter_sizes, num_filters)
    question_branch.add(embed1)
    question_branch.add(cnn_model)
    # 2. keyword model part
    #keyword_branch = KeywordLayer(keywords_length, embed_input, embedding_dim, embed_matrix)
    keyword_branch = LSTMLayer(embed_matrix, embed_input, keywords_length, dropout_prob, hidden_dims, embedding_dim)
    # 3. merge layer
    merged = Merge([question_branch, keyword_branch], mode='concat')
    final_model = Sequential()
    final_model.add(merged)
    final_model.add(Dense(hidden_dims, W_constraint = maxnorm(3)))
    final_model.add(Dropout(0.5))
    final_model.add(Activation('relu'))
    final_model.add(Dense(1))
    final_model.add(Activation('sigmoid'))
    #sgd = SGD(lr=0.01, momentum=0.9)
    final_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    return final_model
text_model.py 文件源码 项目:text_classification 作者: senochow 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def QuestionWithAnswersModel(embed_matrix, embed_input, sequence_length, ans_cnt, keywords_length, filter_sizes, num_filters, dropout_prob, hidden_dims, embedding_dim=300):
    ''' path1: question embedding (CNN model)
        path2: answer embeddin(Hierachical RNN model)
        merge
    '''
    # path 1
    embed1 = Embedding(embed_input, embedding_dim,input_length=sequence_length, weights=[embed_matrix])
    question_branch = Sequential()
    cnn_model = TextCNN(sequence_length, embedding_dim, filter_sizes, num_filters)
    question_branch.add(embed1)
    question_branch.add(cnn_model)
    # path 2
    answer_branch = HierarchicalRNN(embed_matrix, embed_input, ans_cnt, keywords_length, embedding_dim)
    merged = Merge([question_branch, answer_branch], mode='concat')
    final_model = Sequential()
    final_model.add(merged)
    final_model.add(Dense(hidden_dims, W_constraint = maxnorm(3)))
    final_model.add(Dropout(0.5))
    final_model.add(Activation('relu'))
    final_model.add(Dense(1))
    final_model.add(Activation('sigmoid'))
    final_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    return final_model
# vim: set expandtab ts=4 sw=4 sts=4 tw=100:
model2predict.py 文件源码 项目:shell-complete 作者: src-d 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def generate_model(args, nb_features, input_length, nb_repeats=1):
    """
    Generate the model.
    """
    emb_weights = np.eye(nb_features)

    model = Sequential()
    model.add(Embedding(input_dim=nb_features, output_dim=nb_features, input_length=input_length,
                        weights=[emb_weights], trainable=False))
    for layer_id in range(args.input_layers):
        model.add(args.cell_type(args.hidden_layers,
                                 return_sequences=layer_id + 1 < args.input_layers))
        model.add(Dropout(args.dropout))

    model.add(RepeatVector(nb_repeats))
    for _ in range(args.output_layers):
        model.add(args.cell_type(args.hidden_layers, return_sequences=True))
        model.add(Dropout(args.dropout))

    model.add(TimeDistributed(Dense(nb_features)))
    model.add(Activation("softmax"))
    model.compile(loss="sparse_categorical_crossentropy",
                  optimizer=args.optimizer,
                  metrics=["accuracy"])
    return model
model_entailment.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def _get_encoded_sentence_variables(self, sent1_input_layer, sent2_input_layer, dropout,
                                        embedding_file, tune_embedding):
        if embedding_file is None:
            if not tune_embedding:
                print >>sys.stderr, "Pretrained embedding is not given. Setting tune_embedding to True."
                tune_embedding = True
            embedding = None
        else:
            # Put the embedding in a list for Keras to treat it as initiali weights of the embeddign layer.
            embedding = [self.data_processor.get_embedding_matrix(embedding_file, onto_aware=False)]
        vocab_size = self.data_processor.get_vocab_size(onto_aware=False)
        embedding_layer = Embedding(input_dim=vocab_size, output_dim=self.embed_dim, weights=embedding,
                                    trainable=tune_embedding, mask_zero=True, name="embedding")
        embedded_sent1 = embedding_layer(sent1_input_layer)
        embedded_sent2 = embedding_layer(sent2_input_layer)
        if "embedding" in dropout:
            embedded_sent1 = Dropout(dropout["embedding"])(embedded_sent1)
            embedded_sent2 = Dropout(dropout["embedding"])(embedded_sent2)
        if self.shared_memory:
            encoder = MultipleMemoryAccessNSE(output_dim=self.embed_dim, return_mode="output_and_memory",
                                              name="encoder")
            mmanse_sent1_input = InputMemoryMerger(name="merge_sent1_input")([embedded_sent1, embedded_sent2])
            encoded_sent1_and_memory = encoder(mmanse_sent1_input)
            encoded_sent1 = OutputSplitter("output", name="get_sent1_output")(encoded_sent1_and_memory)
            shared_memory = OutputSplitter("memory", name="get_shared_memory")(encoded_sent1_and_memory)
            mmanse_sent2_input = InputMemoryMerger(name="merge_sent2_input")([embedded_sent2, shared_memory])
            encoded_sent2_and_memory = encoder(mmanse_sent2_input)
            encoded_sent2 = OutputSplitter("output", name="get_sent2_output")(encoded_sent2_and_memory)
        else:
            encoder = NSE(output_dim=self.embed_dim, name="encoder")
            encoded_sent1 = encoder(embedded_sent1)
            encoded_sent2 = encoder(embedded_sent2)
        if "encoder" in dropout:
            encoded_sent1 = Dropout(dropout["encoder"])(encoded_sent1)
            encoded_sent2 = Dropout(dropout["encoder"])(encoded_sent2)
        return encoded_sent1, encoded_sent2
embedding.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, word_index_size, synset_index_size, embedding_dim, set_sense_priors=True,
                 tune_embedding=True, **kwargs):
        self.embedding_dim = embedding_dim
        self.word_index_size = word_index_size
        self.synset_index_size = synset_index_size
        self.set_sense_priors = set_sense_priors
        # We have a separate "tune_embedding" field instead of using trainable because we have two sets of
        # parameters here: the embedding weights, and sense prior weights. We may want to fix only one of
        # them at a time.
        self.tune_embedding = tune_embedding
        # Convincing Embedding to return an embedding of the right shape. The output_dim of this layer is embedding_dim+1
        kwargs['output_dim'] = self.embedding_dim
        kwargs['input_dim'] = self.synset_index_size
        self.onto_aware_embedding_weights = None
        super(OntoAwareEmbedding, self).__init__(**kwargs)
embedding.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def call(self, x, mask=None):
        # Remove the word indices at the end before making a call to Embedding.
        x_synsets = x[:, :, :, :-1]  # (num_samples, num_words, num_senses, num_hyps)
        # Taking the last index from the first sense. The last index in all the senses will be the same.
        x_word_index = x[:, :, 0, -1]  # (num_samples, num_words)
        # (num_samples, num_words, num_senses, num_hyps, embedding_dim)
        synset_embedding = super(OntoAwareEmbedding, self).call(x_synsets, mask=None)
        # (num_samples, num_words, 1, 1)
        sense_prior_embedding = K.expand_dims(K.gather(self.sense_priors, x_word_index))
        # Now tile sense_prior_embedding and concatenate it with synset_embedding.
        # (num_samples, num_words, num_senses, num_hyps, 1)
        tiled_sense_prior_embedding = K.expand_dims(K.tile(sense_prior_embedding, (1, 1, self.num_senses, self.num_hyps)))
        synset_embedding_with_priors = K.concatenate([synset_embedding, tiled_sense_prior_embedding])
        return synset_embedding_with_priors
model.py 文件源码 项目:image_caption 作者: MaticsL 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def image_caption_model(vocab_size=2500, embedding_matrix=None, lang_dim=100,
            max_caplen=28, img_dim=2048, clipnorm=1):
    print('generating vocab_history model v5')
    # text: current word
    lang_input = Input(shape=(1,))
    img_input = Input(shape=(img_dim,))
    seq_input = Input(shape=(max_caplen,))
    vhist_input = Input(shape=(vocab_size,))

    if embedding_matrix is not None:
        x = Embedding(output_dim=lang_dim, input_dim=vocab_size, init='glorot_uniform', input_length=1, weights=[embedding_matrix])(lang_input)
    else:
        x = Embedding(output_dim=lang_dim, input_dim=vocab_size, init='glorot_uniform', input_length=1)(lang_input)

    lang_embed = Reshape((lang_dim,))(x)
    lang_embed = merge([lang_embed, seq_input], mode='concat', concat_axis=-1)
    lang_embed = Dense(lang_dim)(lang_embed)
    lang_embed = Dropout(0.25)(lang_embed)

    merge_layer = merge([img_input, lang_embed, vhist_input], mode='concat', concat_axis=-1)
    merge_layer = Reshape((1, lang_dim+img_dim+vocab_size))(merge_layer)

    gru_1 = GRU(img_dim)(merge_layer)
    gru_1 = Dropout(0.25)(gru_1)
    gru_1 = Dense(img_dim)(gru_1)
    gru_1 = BatchNormalization()(gru_1)
    gru_1 = Activation('softmax')(gru_1)

    attention_1 = merge([img_input, gru_1], mode='mul', concat_axis=-1)
    attention_1 = merge([attention_1, lang_embed, vhist_input], mode='concat', concat_axis=-1)
    attention_1 = Reshape((1, lang_dim + img_dim + vocab_size))(attention_1)
    gru_2 = GRU(1024)(attention_1)
    gru_2 = Dropout(0.25)(gru_2)
    gru_2 = Dense(vocab_size)(gru_2)
    gru_2 = BatchNormalization()(gru_2)
    out = Activation('softmax')(gru_2)

    model = Model(input=[img_input, lang_input, seq_input, vhist_input], output=out)
    model.compile(loss='categorical_crossentropy', optimizer=RMSprop(lr=0.0001, clipnorm=1.))
    return model
gru.py 文件源码 项目:3HAN 作者: ni9elf 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def GRUf(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
    wordInputs = Input(shape=(MAX_SENTS+1, MAX_WORDS), name="wordInputs", dtype='float32')

    wordInp = Flatten()(wordInputs)

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInp) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=False), name='gru1')(wordEmbedding)

    v6 = Dense(1, activation="sigmoid", name="dense")(hij)

    model = Model(inputs=[wordInputs] , outputs=[v6])
    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

    return model
han_avg.py 文件源码 项目:3HAN 作者: ni9elf 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def fhan2_avg(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
    wordInputs = Input(shape=(MAX_WORDS,), name="wordInputs", dtype='float32')

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInputs) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)


    Si = GlobalAveragePooling1D()(hij)

    wordEncoder = Model(wordInputs, Si)

    # -----------------------------------------------------------------------------------------------

    docInputs = Input(shape=(None, MAX_WORDS), name='docInputs' ,dtype='float32')

    #sentenceMasking = Masking(mask_value=0.0, name='sentenceMasking')(docInputs)

    sentEncoding = TimeDistributed(wordEncoder, name='sentEncoding')(docInputs) 

    hi = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru2')(sentEncoding)

    Vb = GlobalAveragePooling1D()(hi)

    v6 = Dense(1, activation="sigmoid", kernel_initializer = 'glorot_uniform', name="dense")(Vb)
    model = Model(inputs=[docInputs] , outputs=[v6])

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
    return model, wordEncoder
han.py 文件源码 项目:3HAN 作者: ni9elf 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def han2(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):

    wordInputs = Input(shape=(MAX_WORDS,), name="wordInputs", dtype='float32')

    #print 'in han2 max-nb-words'
    #print MAX_NB_WORDS

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=True, trainable=True, name='wordEmbedding')(wordInputs) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)

    alpha_its, Si = AttentionLayer(name='att1')(hij)

    #wordDrop = Dropout(DROPOUTPER, name='wordDrop')(Si)

    wordEncoder = Model(wordInputs, Si)
    # -----------------------------------------------------------------------------------------------

    docInputs = Input(shape=(None, MAX_WORDS), name='docInputs' ,dtype='float32')

    sentenceMasking = Masking(mask_value=0.0, name='sentenceMasking')(docInputs)

    sentEncoding = TimeDistributed(wordEncoder, name='sentEncoding')(sentenceMasking) 

    hi = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru2')(sentEncoding)

    alpha_s, Vb = AttentionLayer(name='att2')(hi)

    #sentDrop = Dropout(DROPOUTPER, name='sentDrop')(Vb)

    v6 = Dense(1, activation="sigmoid", kernel_initializer = 'he_normal', name="dense")(Vb)
    model = Model(inputs=[docInputs] , outputs=[v6])

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])

    return model, wordEncoder
imdb_lm_gcnn.py 文件源码 项目:nn_playground 作者: DingKe 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def LM(batch_size, window_size=3, vocsize=20000, embed_dim=20, hidden_dim=30, nb_layers=1):
    x = Input(batch_shape=(batch_size, None))
    # mebedding
    y = Embedding(vocsize+2, embed_dim, mask_zero=False)(x)
    for i in range(nb_layers-1):
        y = GCNN(hidden_dim, window_size=window_size,
                 name='gcnn{}'.format(i + 1))(y)
    y = GCNN(hidden_dim, window_size=window_size, 
             name='gcnn{}'.format(nb_layers))(y)
    y = TimeDistributed(Dense(vocsize+2, activation='softmax', name='dense{}'.format(nb_layers)))(y)

    model = Model(inputs=x, outputs=y)

    return model
char_lm_gcnn.py 文件源码 项目:nn_playground 作者: DingKe 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def LM(batch_size, window_size=3, vocsize=20000, embed_dim=20, hidden_dim=30, nb_layers=1):
    x = Input(batch_shape=(batch_size, None))
    # mebedding
    y = Embedding(vocsize+2, embed_dim, mask_zero=False)(x)
    for i in range(nb_layers-1):
        y = GCNN(hidden_dim, window_size=window_size,
                 name='gcnn{}'.format(i + 1))(y)
    y = GCNN(hidden_dim, window_size=window_size, 
             name='gcnn{}'.format(nb_layers))(y)
    y = TimeDistributed(Dense(vocsize+2, activation='softmax', name='dense{}'.format(nb_layers)))(y)

    model = Model(inputs=x, outputs=y)

    return model
imdb_lm.py 文件源码 项目:nn_playground 作者: DingKe 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def LM(batch_size, vocsize=20000, embed_dim=20, hidden_dim=30, nb_layers=1):
    x = Input(batch_shape=(batch_size, None))
    # mebedding
    y = Embedding(vocsize+2, embed_dim, mask_zero=False)(x)
    for i in range(nb_layers-1):
        y = LayerNormLSTM(hidden_dim, return_sequences=True, name='lnlstm{}'.format(i + 1))(y)
    y = LayerNormLSTM(hidden_dim, return_sequences=True, name='lnlstm{}'.format(nb_layers))(y)
    y = TimeDistributed(Dense(vocsize+2, activation='softmax', name='dense{}'.format(nb_layers)))(y)

    model = Model(inputs=x, outputs=y)

    return model
imdb_lm.py 文件源码 项目:nn_playground 作者: DingKe 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def LM(batch_size, vocsize=20000, embed_dim=20, hidden_dim=30, nb_layers=1):
    x = Input(batch_shape=(batch_size, None))
    # mebedding
    y = Embedding(vocsize+2, embed_dim, mask_zero=False)(x)
    for i in range(nb_layers-1):
        y = WeightNormGRU(hidden_dim, return_sequences=True, name='wngru{}'.format(i + 1))(y)
    y = WeightNormGRU(hidden_dim, return_sequences=True, name='wngru{}'.format(nb_layers))(y)
    y = TimeDistributed(Dense(vocsize+2, activation='softmax', name='dense{}'.format(nb_layers)))(y)

    model = Model(input=x, output=y)

    return model


问题


面经


文章

微信
公众号

扫码关注公众号