python类TimeDistributedDense()的实例源码

test_temporal_data_tasks.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_sequence_to_sequence():
    '''
    Apply a same Dense layer for each element of time dimension of the input
    and make predictions of the output sequence elements.
    This does not make use of the temporal structure of the sequence
    (see TimeDistributedDense for more details)
    '''
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=500,
                                                         nb_test=200,
                                                         input_shape=(3, 5),
                                                         output_shape=(3, 5),
                                                         classification=False)

    model = Sequential()
    model.add(TimeDistributedDense(y_train.shape[-1],
                                   input_shape=(X_train.shape[1], X_train.shape[2])))
    model.compile(loss='hinge', optimizer='rmsprop')
    history = model.fit(X_train, y_train, nb_epoch=20, batch_size=16,
                        validation_data=(X_test, y_test), verbose=0)
    assert(history.history['val_loss'][-1] < 0.8)
test_temporal_data_tasks.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_sequence_to_sequence():
    '''
    Apply a same Dense layer for each element of time dimension of the input
    and make predictions of the output sequence elements.
    This does not make use of the temporal structure of the sequence
    (see TimeDistributedDense for more details)
    '''
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=500,
                                                         nb_test=200,
                                                         input_shape=(3, 5),
                                                         output_shape=(3, 5),
                                                         classification=False)

    model = Sequential()
    model.add(TimeDistributedDense(y_train.shape[-1],
                                   input_shape=(X_train.shape[1], X_train.shape[2])))
    model.compile(loss='hinge', optimizer='rmsprop')
    history = model.fit(X_train, y_train, nb_epoch=20, batch_size=16,
                        validation_data=(X_test, y_test), verbose=0)
    assert(history.history['val_loss'][-1] < 0.8)
model.py 文件源码 项目:BetaStock 作者: qweraqq 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def build(self):
        dim_data = self.size_of_input_data_dim
        nb_time_step = self.size_of_input_timesteps
        financial_time_series_input = Input(shape=(nb_time_step, dim_data))

        lstm_layer_1 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout, inner_activation='sigmoid',
                            W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                            return_sequences=True)
        lstm_layer_2 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout, inner_activation='sigmoid',
                            W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                            return_sequences=True)

        h1 = lstm_layer_1(financial_time_series_input)
        h2 = lstm_layer_2(h1)
        time_series_predictions = TimeDistributedDense(1)(h2)
        self.model = Model(financial_time_series_input, time_series_predictions,
                           name="deep rnn for financial time series forecasting")
test_temporal_data_tasks.py 文件源码 项目:keras 作者: NVIDIA 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_sequence_to_sequence():
    '''
    Apply a same Dense layer for each element of time dimension of the input
    and make predictions of the output sequence elements.
    This does not make use of the temporal structure of the sequence
    (see TimeDistributedDense for more details)
    '''
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=500,
                                                         nb_test=200,
                                                         input_shape=(3, 5),
                                                         output_shape=(3, 5),
                                                         classification=False)

    model = Sequential()
    model.add(TimeDistributedDense(y_train.shape[-1],
                                   input_shape=(X_train.shape[1], X_train.shape[2])))
    model.compile(loss='hinge', optimizer='rmsprop')
    history = model.fit(X_train, y_train, nb_epoch=20, batch_size=16,
                        validation_data=(X_test, y_test), verbose=0)
    assert(history.history['val_loss'][-1] < 0.8)
test_loss_weighting.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def create_temporal_sequential_model():
    model = Sequential()
    model.add(GRU(32, input_shape=(timesteps, input_dim), return_sequences=True))
    model.add(TimeDistributedDense(nb_classes))
    model.add(Activation('softmax'))
    return model
test_loss_weighting.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def create_temporal_sequential_model():
    model = Sequential()
    model.add(GRU(32, input_shape=(timesteps, input_dim), return_sequences=True))
    model.add(TimeDistributedDense(nb_classes))
    model.add(Activation('softmax'))
    return model
test_loss_weighting.py 文件源码 项目:keras 作者: NVIDIA 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def create_temporal_sequential_model():
    model = Sequential()
    model.add(GRU(32, input_shape=(timesteps, input_dim), return_sequences=True))
    model.add(TimeDistributedDense(nb_classes))
    model.add(Activation('softmax'))
    return model
test_temporal_data_tasks.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_masked_temporal():
    '''
    Confirm that even with masking on both inputs and outputs, cross-entropies are
    of the expected scale.

    In this task, there are variable length inputs of integers from 1-9, and a random
    subset of unmasked outputs. Each of these outputs has a 50% probability of being
    the input number unchanged, and a 50% probability of being 2*input%10.

    The ground-truth best cross-entropy loss should, then be -log(0.5) = 0.69

    '''
    model = Sequential()
    model.add(Embedding(10, 20, mask_zero=True, input_length=20))
    model.add(TimeDistributedDense(10))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  sample_weight_mode='temporal')

    X = np.random.random_integers(1, 9, (50000, 20))
    for rowi in range(X.shape[0]):
        padding = np.random.random_integers(X.shape[1] / 2)
        X[rowi, :padding] = 0

    # 50% of the time the correct output is the input.
    # The other 50% of the time it's 2 * input % 10
    y = (X * np.random.random_integers(1, 2, X.shape)) % 10
    Y = np.zeros((y.size, 10), dtype='int32')
    for i, target in enumerate(y.flat):
        Y[i, target] = 1
    Y = Y.reshape(y.shape + (10,))

    # Mask 50% of the outputs via sample weights
    sample_weight = np.random.random_integers(0, 1, y.shape)
    print('X shape:', X.shape)
    print('Y shape:', Y.shape)
    print('sample_weight shape:', Y.shape)

    history = model.fit(X, Y, validation_split=0.05,
                        sample_weight=None,
                        verbose=1, nb_epoch=2)
    ground_truth = -np.log(0.5)
    assert(np.abs(history.history['val_loss'][-1] - ground_truth) < 0.06)
simple_sentence_breaker.py 文件源码 项目:learning_rnn 作者: qiangsiwei 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def train_breaker(datafilename, sentence_num=1000, puncs=u',?.?!???', \
            RNN=recurrent.GRU, HIDDEN_SIZE=128, EPOCH_SIZE=10, validate=True):
    wordtable = WordTable()
    wordtable.parse(datafilename, sentence_num)

    X, Y = [], []
    for line in open(datafilename).readlines()[:sentence_num]:
        line = line.strip().decode('utf-8')
        line = re.sub(ur'(^[{0}]+)|([{0}]+$)'.format(puncs),'',line)
        words = wordtable.encode(re.sub(ur'[{0}]'.format(puncs),'',line))
        breaks = re.sub(ur'0[{0}]+'.format(puncs),'1',re.sub(ur'[^{0}]'.format(puncs),'0',line))
        if len(words) >= 30 and len(words) <= 50 and breaks.count('1') >= 4:
            x = np.zeros((len(words), wordtable.capacity), dtype=np.bool)
            y = np.zeros((len(breaks), 2), dtype=np.bool)
            for idx in xrange(len(words)):
                x[idx][words[idx]] = True
                y[idx][int(breaks[idx])] = True
            X.append(x)
            Y.append(y)
    print 'total sentence: ', len(X)

    if validate:
        # Set apart 10% for validation
        split_at = len(X) - len(X)/10
        X_train, X_val = X[:split_at], X[split_at:]
        y_train, y_val = Y[:split_at], Y[split_at:]
    else:
        X_train, y_train = X, Y

    model = Graph()
    model.add_input(name='input', input_shape=(None, wordtable.capacity))
    model.add_node(RNN(HIDDEN_SIZE, return_sequences=True), name='forward', input='input')
    model.add_node(TimeDistributedDense(2, activation='softmax'), name='softmax', input='forward')
    model.add_output(name='output', input='softmax')
    model.compile('adam', {'output': 'categorical_crossentropy'})

    for epoch in xrange(EPOCH_SIZE):
        print "epoch: ", epoch
        for idx, (seq, label) in enumerate(zip(X_train, y_train)):
            loss, accuracy = model.train_on_batch({'input':np.array([seq]), 'output':np.array([label])}, accuracy=True)
            if idx % 20 == 0:
                print "\tidx={0}, loss={1}, accuracy={2}".format(idx, loss, accuracy)

    if validate:
        _Y, _P = [], []
        for (seq, label) in zip(X_val, y_val):
            y = label.argmax(axis=-1)
            p = model.predict({'input':np.array([seq])})['output'][0].argmax(axis=-1)
            _Y.extend(list(y))
            _P.extend(list(p))
        _Y, _P = np.array(_Y), np.array(_P)
        print "should break right: ", ((_P == 1)*(_Y == 1)).sum()
        print "should break wrong: ", ((_P == 0)*(_Y == 1)).sum()
        print "should not break right: ", ((_P == 0)*(_Y == 0)).sum()
        print "should not break wrong: ", ((_P == 1)*(_Y == 0)).sum()

    with open('wordtable_json.txt','w') as wordtable_file:
        wordtable_file.write(wordtable.to_json())
    with open('model_json.txt','w') as model_file:
        model_file.write(model.to_json())
    model.save_weights('model_weights.h5', overwrite=True)
test_temporal_data_tasks.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_masked_temporal():
    '''
    Confirm that even with masking on both inputs and outputs, cross-entropies are
    of the expected scale.

    In this task, there are variable length inputs of integers from 1-9, and a random
    subset of unmasked outputs. Each of these outputs has a 50% probability of being
    the input number unchanged, and a 50% probability of being 2*input%10.

    The ground-truth best cross-entropy loss should, then be -log(0.5) = 0.69

    '''
    model = Sequential()
    model.add(Embedding(10, 20, mask_zero=True, input_length=20))
    model.add(TimeDistributedDense(10))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  sample_weight_mode='temporal')

    X = np.random.random_integers(1, 9, (50000, 20))
    for rowi in range(X.shape[0]):
        padding = np.random.random_integers(X.shape[1] / 2)
        X[rowi, :padding] = 0

    # 50% of the time the correct output is the input.
    # The other 50% of the time it's 2 * input % 10
    y = (X * np.random.random_integers(1, 2, X.shape)) % 10
    Y = np.zeros((y.size, 10), dtype='int32')
    for i, target in enumerate(y.flat):
        Y[i, target] = 1
    Y = Y.reshape(y.shape + (10,))

    # Mask 50% of the outputs via sample weights
    sample_weight = np.random.random_integers(0, 1, y.shape)
    print('X shape:', X.shape)
    print('Y shape:', Y.shape)
    print('sample_weight shape:', Y.shape)

    history = model.fit(X, Y, validation_split=0.05,
                        sample_weight=None,
                        verbose=1, nb_epoch=2)
    ground_truth = -np.log(0.5)
    assert(np.abs(history.history['val_loss'][-1] - ground_truth) < 0.06)
test_temporal_data_tasks.py 文件源码 项目:keras 作者: NVIDIA 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_masked_temporal():
    '''
    Confirm that even with masking on both inputs and outputs, cross-entropies are
    of the expected scale.

    In this task, there are variable length inputs of integers from 1-9, and a random
    subset of unmasked outputs. Each of these outputs has a 50% probability of being
    the input number unchanged, and a 50% probability of being 2*input%10.

    The ground-truth best cross-entropy loss should, then be -log(0.5) = 0.69

    '''
    model = Sequential()
    model.add(Embedding(10, 20, mask_zero=True, input_length=20))
    model.add(TimeDistributedDense(10))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  sample_weight_mode='temporal')

    X = np.random.random_integers(1, 9, (50000, 20))
    for rowi in range(X.shape[0]):
        padding = np.random.random_integers(X.shape[1] / 2)
        X[rowi, :padding] = 0

    # 50% of the time the correct output is the input.
    # The other 50% of the time it's 2 * input % 10
    y = (X * np.random.random_integers(1, 2, X.shape)) % 10
    Y = np.zeros((y.size, 10), dtype='int32')
    for i, target in enumerate(y.flat):
        Y[i, target] = 1
    Y = Y.reshape(y.shape + (10,))

    # Mask 50% of the outputs via sample weights
    sample_weight = np.random.random_integers(0, 1, y.shape)
    print('X shape:', X.shape)
    print('Y shape:', Y.shape)
    print('sample_weight shape:', Y.shape)

    history = model.fit(X, Y, validation_split=0.05,
                        sample_weight=None,
                        verbose=1, nb_epoch=2)
    ground_truth = -np.log(0.5)
    assert(np.abs(history.history['val_loss'][-1] - ground_truth) < 0.06)


问题


面经


文章

微信
公众号

扫码关注公众号