python类LSTM的实例源码

autoencoder.py 文件源码 项目:keras_lstm_vae 作者: twairball 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def create_lstm_autoencoder(input_dim, timesteps, latent_dim):
    """
    Creates an LSTM Autoencoder (VAE). Returns Autoencoder, Encoder, Generator. 
    (All code by fchollet - see reference.)

    # Arguments
        input_dim: int.
        timesteps: int, input timestep dimension.
        latent_dim: int, latent z-layer shape. 

    # References
        - [Building Autoencoders in Keras](https://blog.keras.io/building-autoencoders-in-keras.html)
    """

    inputs = Input(shape=(timesteps, input_dim,))
    encoded = LSTM(latent_dim)(inputs)

    decoded = RepeatVector(timesteps)(encoded)
    decoded = LSTM(input_dim, return_sequences=True)(decoded)

    sequence_autoencoder = Model(inputs, decoded)
    encoder = Model(inputs, encoded)

    autoencoder = Model(inputs, decoded)
    autoencoder.compile(optimizer='adam', loss='mse')
    return autoencoder
windpuller.py 文件源码 项目:DeepTrade_keras 作者: happynoom 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, input_shape, lr=0.01, n_layers=2, n_hidden=8, rate_dropout=0.2, loss='risk_estimation'):
        print("initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s." %(lr, n_layers, n_hidden, rate_dropout))
        self.model = Sequential()
        self.model.add(Dropout(rate=rate_dropout, input_shape=(input_shape[0], input_shape[1])))
        for i in range(0, n_layers - 1):
            self.model.add(LSTM(n_hidden * 4, return_sequences=True, activation='tanh',
                                recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                                recurrent_initializer='orthogonal', bias_initializer='zeros',
                                dropout=rate_dropout, recurrent_dropout=rate_dropout))
        self.model.add(LSTM(n_hidden, return_sequences=False, activation='tanh',
                                recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                                recurrent_initializer='orthogonal', bias_initializer='zeros',
                                dropout=rate_dropout, recurrent_dropout=rate_dropout))
        self.model.add(Dense(1, kernel_initializer=initializers.glorot_uniform()))
        # self.model.add(BatchNormalization(axis=-1, moving_mean_initializer=Constant(value=0.5),
        #               moving_variance_initializer=Constant(value=0.25)))
        self.model.add(BatchRenormalization(axis=-1, beta_init=Constant(value=0.5)))
        self.model.add(Activation('relu_limited'))
        opt = RMSprop(lr=lr)
        self.model.compile(loss=loss,
                      optimizer=opt,
                      metrics=['accuracy'])
dense_lstm_test.py 文件源码 项目:DeepAnomaly 作者: adiyoss 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test(path_test, input_size, hidden_size, batch_size, save_dir, model_name, maxlen):
    db = read_data(path_test)

    X = create_sequences(db[:-maxlen], win_size=maxlen, step=maxlen)
    X = np.reshape(X, (X.shape[0], X.shape[1], input_size))

    # build the model: 1 layer LSTM
    print('Build model...')
    model = Sequential()
    model.add(LSTM(hidden_size, return_sequences=False, input_shape=(maxlen, input_size)))
    model.add(Dense(maxlen))

    model.load_weights(save_dir + model_name)
    model.compile(loss='mse', optimizer='adam')

    prediction = model.predict(X, batch_size, verbose=1)
    prediction = prediction.flatten()
    # prediction_container = np.array(prediction).flatten()
    Y = db[maxlen:]
    plt.plot(prediction, label='prediction')
    plt.plot(Y, label='true')
    plt.legend()
    plt.show()
onto_attention.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, output_dim, num_senses, num_hyps, use_attention=False, return_attention=False, **kwargs):
        # Set output_dim in kwargs so that we can pass it along to LSTM's init
        kwargs['output_dim'] = output_dim
        self.num_senses = num_senses
        self.num_hyps = num_hyps
        self.use_attention = use_attention
        self.return_attention = return_attention
        super(OntoAttentionLSTM, self).__init__(**kwargs)
        # Recurrent would have set the input shape to cause the input dim to be 3. Change it.
        self.input_spec = [InputSpec(ndim=5)]
        if self.consume_less == "cpu":
            # In the LSTM implementation in Keras, consume_less = cpu causes all gates' inputs to be precomputed
            # and stored in memory. However, this doesn't work with OntoLSTM since the input to the gates is 
            # dependent on the previous timestep's output.
            warnings.warn("OntoLSTM does not support consume_less = cpu. Changing it to mem.")
            self.consume_less = "mem"
        #TODO: Remove this dependency.
        if K.backend() == "tensorflow" and not self.unroll:
            warnings.warn("OntoLSTM does not work with unroll=False when backend is TF. Changing it to True.")
            self.unroll = True
nse.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def call(self, x, mask=None):
        # input_shape = (batch_size, input_length, input_dim). This needs to be defined in build.
        initial_read_states = self.get_initial_states(x, mask)
        fake_writer_input = K.expand_dims(initial_read_states[0], dim=1)  # (batch_size, 1, output_dim)
        initial_write_states = self.writer.get_initial_states(fake_writer_input)  # h_0 and c_0 of the writer LSTM
        initial_states = initial_read_states + initial_write_states
        # last_output: (batch_size, output_dim)
        # all_outputs: (batch_size, input_length, output_dim)
        # last_states:
        #       last_memory_state: (batch_size, input_length, output_dim)
        #       last_output
        #       last_writer_ct
        last_output, all_outputs, last_states = self.loop(x, initial_states, mask)
        last_memory = last_states[0]
        if self.return_mode == "last_output":
            return last_output
        elif self.return_mode == "all_outputs":
            return all_outputs
        else:
            # return mode is output_and_memory
            expanded_last_output = K.expand_dims(last_output, dim=1)  # (batch_size, 1, output_dim)
            # (batch_size, 1+input_length, output_dim)
            return K.concatenate([expanded_last_output, last_memory], axis=1)
nse.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def get_initial_states(self, nse_input, input_mask=None):
        '''
        Read input in MMA-NSE will be of shape (batch_size, read_input_length*2, input_dim), a concatenation of
        the actual input to this NSE and the output from a different NSE. The latter will be used to initialize
        the shared memory. The former will be passed to the read LSTM and also used to initialize the current
        memory.
        '''
        input_length = K.shape(nse_input)[1]
        read_input_length = input_length/2
        input_to_read = nse_input[:, :read_input_length, :]
        initial_shared_memory = K.batch_flatten(nse_input[:, read_input_length:, :])
        mem_0 = K.batch_flatten(input_to_read)
        o_mask = self.reader.compute_mask(input_to_read, input_mask)
        reader_states = self.reader.get_initial_states(nse_input)
        initial_states = reader_states + [mem_0, initial_shared_memory]
        return initial_states, o_mask
predict.py 文件源码 项目:golden_touch 作者: at553 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def train_model(self):
        # scale
        scaler = MinMaxScaler(feature_range=(0, 1))
        dataset = scaler.fit_transform(self.data)

        # split into train and test sets
        train_size = int(len(dataset) * 0.95)
        train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :]

        look_back = 5
        trainX, trainY = self.create_dataset(train, look_back)

        # reshape input to be [samples, time steps, features]
        trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
        # create and fit the LSTM network
        model = Sequential()
        model.add(LSTM(6, input_dim=look_back))
        model.add(Dense(1))
        model.compile(loss='mean_squared_error', optimizer='adam')
        model.fit(trainX, trainY, nb_epoch=100, batch_size=1, verbose=2)
        return model
models.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def rcnn(input_shape, n_classes):
    """
    Input size should be [batch, 1d, ch] = (XXX, 3000, 1)
    """
    model = Sequential(name='RCNN test')
    model.add(Conv1D (kernel_size = (200), filters = 20, batch_input_shape=input_shape, activation='elu'))
    model.add(MaxPooling1D(pool_size = (20), strides=(10)))
    model.add(Conv1D (kernel_size = (20), filters = 200, activation='elu'))
    model.add(MaxPooling1D(pool_size = (10), strides=(3)))
    model.add(Conv1D (kernel_size = (20), filters = 200, activation='elu'))
    model.add(MaxPooling1D(pool_size = (10), strides=(3)))
    model.add(Dense (512, activation='elu'))
    model.add(Dense (512, activation='elu'))
    model.add(Reshape((1,model.output_shape[1])))
    model.add(LSTM(256, stateful=True, return_sequences=False))
    model.add(Dropout(0.3))
    model.add(Dense(n_classes, activation = 'sigmoid'))
    model.compile(loss='categorical_crossentropy', optimizer=Adadelta())
    return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def create_lstm_layer(self, input_dim):
        """Create a LSTM layer of a model."""

        inp = Input(shape=(input_dim, self.embedding_dim,))
        inp_dropout = Dropout(self.ldrop_val)(inp)
        ker_in = glorot_uniform(seed=self.seed)
        rec_in = Orthogonal(seed=self.seed)
        outp = LSTM(self.hidden_dim, input_shape=(input_dim, self.embedding_dim,),
                    kernel_regularizer=None,
                    recurrent_regularizer=None,
                    bias_regularizer=None,
                    activity_regularizer=None,
                    recurrent_dropout=self.recdrop_val,
                    dropout=self.inpdrop_val,
                    kernel_initializer=ker_in,
                    recurrent_initializer=rec_in,
                    return_sequences=True)(inp_dropout)
        outp_dropout = Dropout(self.dropout_val)(outp)
        model = Model(inputs=inp, outputs=outp_dropout, name="LSTM_encoder")
        return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def create_lstm_layer_1(self, input_dim):
        """Create a LSTM layer of a model."""

        inp = Input(shape=(input_dim,  self.embedding_dim,))
        inp_drop = Dropout(self.ldrop_val)(inp)
        ker_in = glorot_uniform(seed=self.seed)
        rec_in = Orthogonal(seed=self.seed)
        bioutp = Bidirectional(LSTM(self.hidden_dim,
                                    input_shape=(input_dim, self.embedding_dim,),
                                    kernel_regularizer=None,
                                    recurrent_regularizer=None,
                                    bias_regularizer=None,
                                    activity_regularizer=None,
                                    recurrent_dropout=self.recdrop_val,
                                    dropout=self.inpdrop_val,
                                    kernel_initializer=ker_in,
                                    recurrent_initializer=rec_in,
                                    return_sequences=True), merge_mode=None)(inp_drop)
        dropout_forw = Dropout(self.dropout_val)(bioutp[0])
        dropout_back = Dropout(self.dropout_val)(bioutp[1])
        model = Model(inputs=inp, outputs=[dropout_forw, dropout_back], name="biLSTM_encoder")
        return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def create_lstm_layer_2(self, input_dim):
        """Create a LSTM layer of a model."""

        inp = Input(shape=(input_dim, 2*self.perspective_num,))
        inp_drop = Dropout(self.ldrop_val)(inp)
        ker_in = glorot_uniform(seed=self.seed)
        rec_in = Orthogonal(seed=self.seed)
        bioutp = Bidirectional(LSTM(self.aggregation_dim,
                                    input_shape=(input_dim, 2*self.perspective_num,),
                                    kernel_regularizer=None,
                                    recurrent_regularizer=None,
                                    bias_regularizer=None,
                                    activity_regularizer=None,
                                    recurrent_dropout=self.recdrop_val,
                                    dropout=self.inpdrop_val,
                                    kernel_initializer=ker_in,
                                    recurrent_initializer=rec_in,
                                    return_sequences=True), merge_mode=None)(inp_drop)
        dropout_forw = Dropout(self.dropout_val)(bioutp[0])
        dropout_back = Dropout(self.dropout_val)(bioutp[1])
        model = Model(inputs=inp, outputs=[dropout_forw, dropout_back], name="biLSTM_enc_persp")
        return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def bilstm_woatt_model(self):
        """Define a model with bi-LSTM layers and without attention."""

        input_a = Input(shape=(self.max_sequence_length, self.embedding_dim,))
        input_b = Input(shape=(self.max_sequence_length, self.embedding_dim,))
        lstm_layer = self.create_lstm_layer_last(self.max_sequence_length)
        lstm_last_a = lstm_layer(input_a)
        lstm_last_b = lstm_layer(input_b)

        dist = Lambda(self.cosine_dist, output_shape=self.cosine_dist_output_shape,
                      name="similarity_network")([lstm_last_a, lstm_last_b])

        dense = Dense(1, activation='sigmoid', name='similarity_score',
                      kernel_regularizer=None,
                      bias_regularizer=None,
                      activity_regularizer=None)(dist)

        model = Model([input_a, input_b], dense)

        return model
model.py 文件源码 项目:Sentiment-Analysis 作者: jasonwu0731 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, n_classes, vocab_size, max_len, num_units=128,
                 useBiDirection=False, useAttention=False, learning_rate=0.001, dropout=0, embedding_size=300):
        self.model = Sequential()
        self.model.add(Embedding(input_dim=vocab_size,
                                 output_dim=embedding_size, input_length=max_len))
        lstm_model = LSTM(num_units, dropout=dropout)
        if useBiDirection:
            lstm_model = Bidirectional(lstm_model)
        if useAttention:
            lstm_model = lstm_model
            print("Attention not implement yet ... ")
        self.model.add(lstm_model)
        self.model.add(Dense(n_classes, activation='softmax'))

        self.model.summary()
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=Adam(lr=learning_rate),
                           metrics=['accuracy'])
main.py 文件源码 项目:keras-timeseries-prediction 作者: gcarq 项目源码 文件源码 阅读 73 收藏 0 点赞 0 评论 0
def build_model(look_back: int, batch_size: int=1) -> Sequential:
    """
    The function builds a keras Sequential model
    :param look_back: number of previous time steps as int
    :param batch_size: batch_size as int, defaults to 1
    :return: keras Sequential model
    """
    model = Sequential()
    model.add(LSTM(64,
                   activation='relu',
                   batch_input_shape=(batch_size, look_back, 1),
                   stateful=True,
                   return_sequences=False))
    model.add(Dense(1, activation='linear'))
    model.compile(loss='mean_squared_error', optimizer='adam')
    return model
sequence_encoders.py 文件源码 项目:keras-text 作者: raghakot 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, rnn_class=LSTM, encoder_dims=50, bidirectional=True, dropout_rate=0.5, **rnn_kwargs):
        """Creates an RNN model with attention. The attention mechanism is implemented as described
        in https://www.cs.cmu.edu/~hovy/papers/16HLT-hierarchical-attention-networks.pdf, but without
        sentence level attention.

        Args:
            rnn_class: The type of RNN to use. (Default Value = LSTM)
            encoder_dims: The number of hidden units of RNN. (Default Value: 50)
            bidirectional: Whether to use bidirectional encoding. (Default Value = True)
            **rnn_kwargs: Additional args for building the RNN.
        """
        super(AttentionRNN, self).__init__(dropout_rate)
        self.rnn_class = rnn_class
        self.encoder_dims = encoder_dims
        self.bidirectional = bidirectional
        self.rnn_kwargs = rnn_kwargs
lstm_bilstm.py 文件源码 项目:sota_sentiment 作者: jbarnesspain 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def create_BiLSTM(wordvecs, lstm_dim=300, output_dim=2, dropout=.5,
                weights=None, train=True):
    model = Sequential()
    if weights != None:
        model.add(Embedding(len(wordvecs)+1,
            len(wordvecs['the']),
            weights=[weights],
                    trainable=train))
    else:
        model.add(Embedding(len(wordvecs)+1,
            len(wordvecs['the']),
                    trainable=train))
    model.add(Dropout(dropout))
    model.add(Bidirectional(LSTM(lstm_dim)))
    model.add(Dropout(dropout))
    model.add(Dense(output_dim, activation='softmax'))
    if output_dim == 2:
        model.compile('adam', 'binary_crossentropy',
                  metrics=['accuracy'])
    else:
        model.compile('adam', 'categorical_crossentropy',
                  metrics=['accuracy'])
    return model
lstm_bilstm.py 文件源码 项目:sota_sentiment 作者: jbarnesspain 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def print_results(bi, file, out_file, file_type):

    names, results, std_devs, dim = test_embeddings(bi, file, file_type)

    rr = [[u'{0:.3f} \u00B1{1:.3f}'.format(r, s) for r, s in zip(result, std_dev)] for result, std_dev in zip(results, std_devs)]
    table_data = [[name] + result for name, result in zip(names, rr)]
    table = tabulate.tabulate(table_data, headers=['dataset', 'acc', 'prec', 'rec', 'f1'], tablefmt='simple', floatfmt='.3f')

    if out_file:
        with open(out_file, 'a') as f:
            f.write('\n')
            if bi:
                f.write('+++Bidirectional LSTM+++\n')
            else:
                f.write('+++LSTM+++\n')
            f.write(table)
            f.write('\n')
    else:
        print()
        if bi:
            print('Bidirectional LSTM')
        else:
            print('LSTM')
        print(table)
textgenrnn.py 文件源码 项目:textgenrnn 作者: minimaxir 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def textgenrnn_model(weights_path, num_classes, maxlen=40):
    '''
    Builds the model architecture for textgenrnn and
    loads the pretrained weights for the model.
    '''

    input = Input(shape=(maxlen,), name='input')
    embedded = Embedding(num_classes, 100, input_length=maxlen,
                         trainable=True, name='embedding')(input)
    rnn = LSTM(128, return_sequences=False, name='rnn')(embedded)
    output = Dense(num_classes, name='output', activation='softmax')(rnn)

    model = Model(inputs=[input], outputs=[output])
    model.load_weights(weights_path, by_name=True)
    model.compile(loss='categorical_crossentropy', optimizer='nadam')
    return model
rnn.py 文件源码 项目:rupo 作者: IlyaGusev 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, dict_path: str=None, word_max_length: int=30, language: str = "ru", rnn=LSTM,
                 units1: int=256, units2: int=256, dropout: float=0.2, batch_size=2048, emb_dimension=30):
        self.rnn = rnn
        self.dropout = dropout  # type: float
        self.units1 = units1  # type: int
        self.units2 = units2  # type: int
        self.language = language  # type: str
        if language == "ru":
            self.grapheme_alphabet = RU_GRAPHEME_SET
        elif language == "en":
            self.grapheme_alphabet = EN_GRAPHEME_SET
        else:
            assert False
        self.dict_path = dict_path  # type: str
        self.word_max_length = word_max_length  # type: int
        self.emb_dimension = emb_dimension
        self.batch_size = batch_size
        self.model = None
createDummyData.py 文件源码 项目:ML_algorithm 作者: luoshao23 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, **kwargs):
        """
        :param **kwargs: output_dim=4: output dimension of LSTM layer;
         activation_lstm='tanh': activation function for LSTM layers;
         activation_dense='relu': activation function for Dense layer;
         activation_last='sigmoid': activation function for last layer;
         drop_out=0.2: fraction of input units to drop;
         np_epoch=10, the number of epoches to train the model. epoch is one forward pass and one backward pass of all the training examples;
         batch_size=32: number of samples per gradient update. The higher the batch size, the more memory space you'll need;
         loss='mean_square_error': loss function;
         optimizer='rmsprop'
        """
        self.output_dim = kwargs.get('output_dim', 8)
        self.activation_lstm = kwargs.get('activation_lstm', 'relu')
        self.activation_dense = kwargs.get('activation_dense', 'relu')
        self.activation_last = kwargs.get('activation_last', 'softmax')    # softmax for multiple output
        self.dense_layer = kwargs.get('dense_layer', 2)     # at least 2 layers
        self.lstm_layer = kwargs.get('lstm_layer', 2)
        self.drop_out = kwargs.get('drop_out', 0.2)
        self.nb_epoch = kwargs.get('nb_epoch', 10)
        self.batch_size = kwargs.get('batch_size', 100)
        self.loss = kwargs.get('loss', 'categorical_crossentropy')
        self.optimizer = kwargs.get('optimizer', 'rmsprop')
gran.py 文件源码 项目:Hotpot 作者: Liang-Qiu 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def prep_model(inputs, N, s0pad, s1pad, c, granlevels=1):
    # LSTM
    lstm = LSTM(N, return_sequences=True, implementation=2, 
                   kernel_regularizer=l2(c['l2reg']), recurrent_regularizer=l2(c['l2reg']),
                   bias_regularizer=l2(c['l2reg']))
    x1 = inputs[0]
    x2 = inputs[1]
    h1 = lstm(x1)
    h2 = lstm(x2)

    W_x = Dense(N, kernel_initializer='glorot_uniform', use_bias=True, 
                   kernel_regularizer=l2(c['l2reg']))
    W_h = Dense(N, kernel_initializer='orthogonal', use_bias=True,
                   kernel_regularizer=l2(c['l2reg']))
    sigmoid = Activation('sigmoid')
    a1 = multiply([x1, sigmoid( add([W_x(x1), W_h(h1)]) )])
    a2 = multiply([x2, sigmoid( add([W_x(x2), W_h(h2)]) )])

    # Averaging
    avg = Lambda(function=lambda x: K.mean(x, axis=1),
                 output_shape=lambda shape: (shape[0], ) + shape[2:])
    gran1 = avg(a1)
    gran2 = avg(a2)

    return [gran1, gran2], N
gran.py 文件源码 项目:Hotpot 作者: Liang-Qiu 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def prep_model(inputs, N, s0pad, s1pad, c, granlevels=1):
    # LSTM
    lstm = LSTM(N, return_sequences=True, implementation=2, 
                   kernel_regularizer=l2(c['l2reg']), recurrent_regularizer=l2(c['l2reg']),
                   bias_regularizer=l2(c['l2reg']))
    x1 = inputs[0]
    x2 = inputs[1]
    h1 = lstm(x1)
    h2 = lstm(x2)

    W_x = Dense(N, kernel_initializer='glorot_uniform', use_bias=True, 
                   kernel_regularizer=l2(c['l2reg']))
    W_h = Dense(N, kernel_initializer='orthogonal', use_bias=True,
                   kernel_regularizer=l2(c['l2reg']))
    sigmoid = Activation('sigmoid')
    a1 = multiply([x1, sigmoid( add([W_x(x1), W_h(h1)]) )])
    a2 = multiply([x2, sigmoid( add([W_x(x2), W_h(h2)]) )])

    # Averaging
    avg = Lambda(function=lambda x: K.mean(x, axis=1),
                 output_shape=lambda shape: (shape[0], ) + shape[2:])
    gran1 = avg(a1)
    gran2 = avg(a2)

    return [gran1, gran2], N
models.py 文件源码 项目:aes-gated-word-char 作者: unkn0wnxx 项目源码 文件源码 阅读 66 收藏 0 点赞 0 评论 0
def create_char_lstm_model(self, emb_dim, word_maxlen, vocab_char_size,
                               char_maxlen):
        from keras.layers import LSTM
        logger.info('Building character LSTM model')
        input_char = Input(shape=(char_maxlen, ), name='input_char')
        char_emb = Embedding(
            vocab_char_size, emb_dim, mask_zero=True)(input_char)
        lstm = LSTM(
            300,
            return_sequences=True,
            dropout=self.dropout,
            recurrent_dropout=self.recurrent_dropout)(char_emb)
        dropped = Dropout(0.5)(lstm)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=input_char, outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        logger.info('  Done')
        return model
test_keras2.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_conv1d_lstm(self):
        from keras.layers import Conv1D, LSTM, Dense
        model = Sequential()
        # input_shape = (time_step, dimensions)
        model.add(Conv1D(32,3,padding='same',input_shape=(10,8)))
        # conv1d output shape = (None, 10, 32)
        model.add(LSTM(24))
        model.add(Dense(1, activation='sigmoid'))

        input_names = ['input']
        output_names = ['output']
        spec = keras.convert(model, input_names, output_names).get_spec()

        self.assertIsNotNone(spec)
        self.assertTrue(spec.HasField('neuralNetwork'))

        # Test the inputs and outputs
        self.assertEquals(len(spec.description.input), len(input_names) + 2)
        self.assertEquals(len(spec.description.output), len(output_names) + 2)

        # Test the layer parameters.
        layers = spec.neuralNetwork.layers
        self.assertIsNotNone(layers[0].convolution)
        self.assertIsNotNone(layers[1].simpleRecurrent)
        self.assertIsNotNone(layers[2].innerProduct)
test_recurrent_stress_tests.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_SimpleLSTMStacked(self):
        params = dict(
            input_dims=[1, 1, 1], go_backwards=False, activation='tanh',
            stateful=False, unroll=False, return_sequences=False, output_dim=1
        ),
        model = Sequential()
        model.add(LSTM(output_dim=params[0]['output_dim'],
                       input_length=params[0]['input_dims'][1],
                       input_dim=params[0]['input_dims'][2],
                       activation=params[0]['activation'],
                       inner_activation='sigmoid',
                       return_sequences=True,
                       go_backwards=params[0]['go_backwards'],
                       unroll=params[0]['unroll'],
                       ))
        model.add(LSTM(output_dim=1,
                       activation='tanh',
                       inner_activation='sigmoid',
                       ))
        relative_error, keras_preds, coreml_preds = simple_model_eval(params, model)
        for i in range(len(relative_error)):
            self.assertLessEqual(relative_error[i], 0.01)
test_recurrent_stress_tests.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_many_to_many(self):
        params = dict(
            input_dims=[1, 10, 5], go_backwards=False, activation='tanh',  # fails with hard_sigmoid
            stateful=False, unroll=False, return_sequences=True, output_dim=1
        ),
        model = Sequential()
        model.add(LSTM(output_dim=params[0]['output_dim'],
                       input_shape=(10, 5),
                       activation=params[0]['activation'],
                       inner_activation='sigmoid',
                       return_sequences=True,
                       ))
        relative_error, keras_preds, coreml_preds = simple_model_eval(params, model)
        # print relative_error, '\n', keras_preds, '\n', coreml_preds, '\n'
        for i in range(len(relative_error)):
            self.assertLessEqual(relative_error[i], 0.01)
test_neural_networks.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_classifier_no_name(self):
        np.random.seed(1988)

        input_dim = 5
        num_hidden = 12
        num_classes = 6
        input_length = 3

        model = Sequential()
        model.add(LSTM(num_hidden, input_dim=input_dim, input_length=input_length, return_sequences=False))
        model.add(Dense(num_classes, activation='softmax'))

        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        input_names = ['input']
        output_names = ['zzzz']
        class_labels = ['a', 'b', 'c', 'd', 'e', 'f']
        predicted_feature_name = 'pf'
        coremlmodel = keras_converter.convert(model, input_names, output_names, class_labels=class_labels, predicted_feature_name=predicted_feature_name)

        inputs = np.random.rand(input_dim)
        outputs = coremlmodel.predict({'input': inputs})
        # this checks that the dictionary got the right name and type
        self.assertEquals(type(outputs[output_names[0]]), type({'a': 0.5}))
test_keras2_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_tiny_no_sequence_lstm_zeros_gpu(self):
        np.random.seed(1988)
        input_dim = 1
        input_length = 1
        num_channels = 1

        # Define a model
        model = Sequential()
        model.add(LSTM(num_channels, input_shape = (input_length, input_dim),
                       implementation = 2, recurrent_activation = 'sigmoid'))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape)*0.2-0.1 for w in model.get_weights()])

        # Test the keras model
        self._test_keras_model(model, mode = 'zeros', input_blob = 'data', output_blob = 'output')
test_keras2_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_small_no_sequence_lstm_random(self):
        np.random.seed(1988)
        input_dim = 10
        input_length = 1
        num_channels = 1

        # Define a model
        model = Sequential()
        model.add(LSTM(num_channels, input_shape = (input_length, input_dim),
                       implementation = 2, recurrent_activation = 'sigmoid'))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape)*0.2-0.1 for w in model.get_weights()])

        # Test the keras model
        self._test_keras_model(model, input_blob = 'data', output_blob = 'output')
test_keras2_numeric.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_tiny_no_sequence_bidir_random(self,
                                           model_precision=_MLMODEL_FULL_PRECISION):
        np.random.seed(1988)
        input_dim = 1
        input_length = 1
        num_channels = 1
        num_samples = 1

        # Define a model
        model = Sequential()
        model.add(Bidirectional(LSTM(num_channels, 
            implementation = 1, recurrent_activation = 'sigmoid'),
            input_shape=(input_length, input_dim)))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape)*0.2-0.1 for w in model.get_weights()])

        # Test the keras model
        self._test_keras_model(model, input_blob = 'data', output_blob = 'output',
                               model_precision=model_precision)


问题


面经


文章

微信
公众号

扫码关注公众号