python类load_data()的实例源码

models.py 文件源码 项目:loss-correction 作者: giorgiop 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def get_data(self):

        (X_train, y_train), (X_test, y_test) = self.load_data()

        idx_perm = np.random.RandomState(101).permutation(X_train.shape[0])
        X_train, y_train = X_train[idx_perm], y_train[idx_perm]

        X_train = X_train.astype('float32')
        X_test = X_test.astype('float32')

        print('X_train shape:', X_train.shape)
        print(X_train.shape[0], 'train samples')
        print(X_test.shape[0], 'test samples')

        return X_train, X_test, y_train, y_test

    # custom losses for the CNN
ff_mnist.py 文件源码 项目:deep_learning_ex 作者: zatonovo 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def load_data():
    print 'Loading data...'
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')

    X_train /= 255
    X_test /= 255

    y_train = np_utils.to_categorical(y_train, 10)
    y_test = np_utils.to_categorical(y_test, 10)

    X_train = np.reshape(X_train, (60000, 784))
    X_test = np.reshape(X_test, (10000, 784))

    print 'Data loaded.'
    return [X_train, X_test, y_train, y_test]
cnn_mnist.py 文件源码 项目:deep_learning_ex 作者: zatonovo 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def load_data():
    print 'Loading data...'
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')

    X_train /= 255
    X_test /= 255

    y_train = np_utils.to_categorical(y_train, 10)
    y_test = np_utils.to_categorical(y_test, 10)

    X_train = np.reshape(X_train, (60000, 1, 28,28))
    X_test = np.reshape(X_test, (10000, 1, 28,28))

    print 'Data loaded'
    return [X_train, X_test, y_train, y_test]
test_datasets.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_cifar():
    # only run data download tests 20% of the time
    # to speed up frequent testing
    random.seed(time.time())
    if random.random() > 0.8:
        (X_train, y_train), (X_test, y_test) = cifar10.load_data()
        (X_train, y_train), (X_test, y_test) = cifar100.load_data('fine')
        (X_train, y_train), (X_test, y_test) = cifar100.load_data('coarse')
test_regularizers.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_data():
    # the data, shuffled and split between tran and test sets
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = X_train.reshape(60000, 784)[:max_train_samples]
    X_test = X_test.reshape(10000, 784)[:max_test_samples]
    X_train = X_train.astype("float32") / 255
    X_test = X_test.astype("float32") / 255

    # convert class vectors to binary class matrices
    y_train = y_train[:max_train_samples]
    y_test = y_test[:max_test_samples]
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    test_ids = np.where(y_test == np.array(weighted_class))[0]

    return (X_train, Y_train), (X_test, Y_test), test_ids
regularizers.py 文件源码 项目:keras-contrib 作者: farizrahman4u 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def get_data():
    # the data, shuffled and split between tran and test sets
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = X_train.reshape(60000, 784)[:max_train_samples]
    X_test = X_test.reshape(10000, 784)[:max_test_samples]
    X_train = X_train.astype('float32') / 255
    X_test = X_test.astype('float32') / 255

    # convert class vectors to binary class matrices
    y_train = y_train[:max_train_samples]
    y_test = y_test[:max_test_samples]
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    test_ids = np.where(y_test == np.array(weighted_class))[0]

    return (X_train, Y_train), (X_test, Y_test), test_ids
mnist.py 文件源码 项目:dist_hyperas 作者: osh 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def train_model(model):
    batch_size = 128
    nb_epoch = 2
    nb_classes = 10

    from keras.datasets import mnist
    from keras.utils import np_utils
    import time
    a = time.time()

    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)

    model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
    h = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
          verbose=0, validation_data=(X_test, Y_test))

    (loss,acc) = model.evaluate(X_test, Y_test, verbose=0)
    return {'loss':loss, 'accuracy':acc, 'epoch':h.epoch, 'time':time.time()-a, 'loss_hist':h.history['loss'], 'vloss_hist':h.history['val_loss']}
data.py 文件源码 项目:keras-visualize-activations 作者: philipperemy 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_mnist_data():

    # the data, shuffled and split between train and test sets
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
    x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    print('x_train shape:', x_train.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)
    return x_train, y_train, x_test, y_test
reversing_gan.py 文件源码 项目:gandlf 作者: codekansas 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def get_mnist_data(binarize=False):
    """Puts the MNIST data in the right format."""

    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    if binarize:
        X_test = np.where(X_test >= 10, 1, -1)
        X_train = np.where(X_train >= 10, 1, -1)
    else:
        X_train = (X_train.astype(np.float32) - 127.5) / 127.5
        X_test = (X_test.astype(np.float32) - 127.5) / 127.5

    X_train = np.expand_dims(X_train, axis=-1)
    X_test = np.expand_dims(X_test, axis=-1)

    y_train = np.eye(10)[y_train]
    y_test = np.eye(10)[y_test]

    return (X_train, y_train), (X_test, y_test)
mnist_gan.py 文件源码 项目:gandlf 作者: codekansas 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_mnist_data(binarize=False):
    """Puts the MNIST data in the right format."""

    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    if binarize:
        X_test = np.where(X_test >= 10, 1, -1)
        X_train = np.where(X_train >= 10, 1, -1)
    else:
        X_train = (X_train.astype(np.float32) - 127.5) / 127.5
        X_test = (X_test.astype(np.float32) - 127.5) / 127.5

    X_train = np.expand_dims(X_train, axis=-1)
    X_test = np.expand_dims(X_test, axis=-1)

    y_train = np.expand_dims(y_train, axis=-1)
    y_test = np.expand_dims(y_test, axis=-1)

    return (X_train, y_train), (X_test, y_test)
use_intermediate_functions.py 文件源码 项目:hyperas 作者: maxpumperla 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def data():
    '''
    Data providing function:

    This function is separated from model() so that hyperopt
    won't reload data for each evaluation run.
    '''
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = X_train.reshape(60000, 784)
    X_test = X_test.reshape(10000, 784)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')

    visualization_mnist(X_test)

    X_train /= 255
    X_test /= 255
    nb_classes = 10
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    return X_train, Y_train, X_test, Y_test
simple.py 文件源码 项目:hyperas 作者: maxpumperla 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def data():
    '''
    Data providing function:

    This function is separated from model() so that hyperopt
    won't reload data for each evaluation run.
    '''
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = X_train.reshape(60000, 784)
    X_test = X_test.reshape(10000, 784)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255
    nb_classes = 10
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    return X_train, Y_train, X_test, Y_test
complex.py 文件源码 项目:hyperas 作者: maxpumperla 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def data():
    '''
    Data providing function:

    This function is separated from model() so that hyperopt
    won't reload data for each evaluation run.
    '''
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = X_train.reshape(60000, 784)
    X_test = X_test.reshape(10000, 784)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255
    nb_classes = 10
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    return X_train, Y_train, X_test, Y_test
mnist_readme.py 文件源码 项目:hyperas 作者: maxpumperla 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def data():
    """
    Data providing function:

    This function is separated from model() so that hyperopt
    won't reload data for each evaluation run.
    """
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    nb_classes = 10
    y_train = np_utils.to_categorical(y_train, nb_classes)
    y_test = np_utils.to_categorical(y_test, nb_classes)
    return x_train, y_train, x_test, y_test
test.py 文件源码 项目:tensorsne 作者: gokceneraslan 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def get_mnist(n_train=5000, n_test=500, pca=True, d=50, dtype=np.float32):
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    n, row, col = X_train.shape
    channel = 1

    X_train = X_train.reshape(-1, channel * row * col)
    X_test = X_test.reshape(-1, channel * row * col)
    X_train = X_train.astype(dtype)
    X_test = X_test.astype(dtype)
    X_train /= 255
    X_test /= 255

    X_train = X_train[:n_train] - X_train[:n_train].mean(axis=0)
    X_test = X_test[:n_test] - X_test[:n_test].mean(axis=0)

    if pca:
        pcfit = PCA(n_components=d)

        X_train = pcfit.fit_transform(X_train)
        X_test = pcfit.transform(X_test)

    y_train = y_train[:n_train]
    y_test = y_test[:n_test]

    return X_train, y_train, X_test, y_test
Config.py 文件源码 项目:NetworkCompress 作者: luzai 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def load_data(self, limit_data, type='cifar10'):
        if MyConfig.cache_data is None:
            if type == 'cifar10':
                (train_x, train_y), (test_x, test_y) = cifar10.load_data()
            elif type == 'mnist':
                (train_x, train_y), (test_x, test_y) = mnist.load_data()
            elif type == 'cifar100':
                (train_x, train_y), (test_x, test_y) = cifar100.load_data(label_mode='fine')
            elif type == 'svhn':
                (train_x, train_y), (test_x, test_y) = load_data_svhn()

            train_x, mean_img = self._preprocess_input(train_x, None)
            test_x, _ = self._preprocess_input(test_x, mean_img)

            train_y, test_y = map(self._preprocess_output, [train_y, test_y])

            res = {'train_x': train_x, 'train_y': train_y, 'test_x': test_x, 'test_y': test_y}

            for key, val in res.iteritems():
                res[key] = MyConfig._limit_data(val, limit_data)
            MyConfig.cache_data = res

        self.dataset = MyConfig.cache_data
datasets.py 文件源码 项目:DEC-keras 作者: XifengGuo 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def load_retures_keras():
    from keras.preprocessing.text import Tokenizer
    from keras.datasets import reuters
    max_words = 1000

    print('Loading data...')
    (x, y), (_, _) = reuters.load_data(num_words=max_words, test_split=0.)
    print(len(x), 'train sequences')

    num_classes = np.max(y) + 1
    print(num_classes, 'classes')

    print('Vectorizing sequence data...')
    tokenizer = Tokenizer(num_words=max_words)
    x = tokenizer.sequences_to_matrix(x, mode='binary')
    print('x_train shape:', x.shape)

    return x.astype(float), y
datasets.py 文件源码 项目:DEC-keras 作者: XifengGuo 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def load_imdb():
    from keras.preprocessing.text import Tokenizer
    from keras.datasets import imdb
    max_words = 1000

    print('Loading data...')
    (x1, y1), (x2, y2) = imdb.load_data(num_words=max_words)
    x = np.concatenate((x1, x2))
    y = np.concatenate((y1, y2))
    print(len(x), 'train sequences')

    num_classes = np.max(y) + 1
    print(num_classes, 'classes')

    print('Vectorizing sequence data...')
    tokenizer = Tokenizer(num_words=max_words)
    x = tokenizer.sequences_to_matrix(x, mode='binary')
    print('x_train shape:', x.shape)

    return x.astype(float), y
test_regularizers.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def get_data():
    # the data, shuffled and split between tran and test sets
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = X_train.reshape(60000, 784)[:max_train_samples]
    X_test = X_test.reshape(10000, 784)[:max_test_samples]
    X_train = X_train.astype("float32") / 255
    X_test = X_test.astype("float32") / 255

    # convert class vectors to binary class matrices
    y_train = y_train[:max_train_samples]
    y_test = y_test[:max_test_samples]
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    test_ids = np.where(y_test == np.array(weighted_class))[0]

    return (X_train, Y_train), (X_test, Y_test), test_ids
feedforward_keras_mnist.py 文件源码 项目:deep_learning 作者: Vict0rSch 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def load_data():
    print 'Loading data...'
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')

    X_train /= 255
    X_test /= 255

    y_train = np_utils.to_categorical(y_train, 10)
    y_test = np_utils.to_categorical(y_test, 10)

    X_train = np.reshape(X_train, (60000, 784))
    X_test = np.reshape(X_test, (10000, 784))

    print 'Data loaded.'
    return [X_train, X_test, y_train, y_test]
mnist_3d.py 文件源码 项目:huaat_ml_dl 作者: ieee820 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def save_2d(label):
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    l_z,l_x,l_y = X_train.shape
    #cubes = np.ndarray([10,28,28],dtype=np.uint8)
    #new_1 = np.random(28,28)
    new_all = np.ones(784)
    new_all.resize(28,28)
    j = 1
    for i in range(0, l_z):
        #print X_train[i,:,:],y_train[i]
        #if j >= 10:
            #break;
        new = X_train[i,:,:]
        if y_train[i] == label :
            new_all = np.concatenate((new_all,new),axis=0)
            j = j +1

    #reshape and save
    new_all.resize(j,28,28)
    new_mini = new_all[1:,:,:]

    np.save('/home/yangjj/minist_npy/'+str(label),new_mini)
autoencoder.py 文件源码 项目:dsde-deep-learning 作者: broadinstitute 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def load_mnist(flatten=True):
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.astype('float32') / 255.
    x_test = x_test.astype('float32') / 255.

    if flatten:
        x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
        x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
    else:
        x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))  # adapt this if using `channels_first` image data format
        x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))  # adapt this if using `channels_first` image data format

    print(x_train.shape)
    print(x_test.shape)

    return (x_train, y_train), (x_test, y_test)
autoencoder.py 文件源码 项目:dsde-deep-learning 作者: broadinstitute 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def load_cifar(flatten=True):
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255.
    x_test /= 255.

    if flatten:
        x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
        x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
    else:
        x_train = np.reshape(x_train, (len(x_train), 32, 32, 3))  # adapt this if using `channels_first` image data format
        x_test = np.reshape(x_test, (len(x_test), 32, 32, 3))  # adapt this if using `channels_first` image data format

    print('bounds:', np.min(x_train), np.max(x_train))
    print('x_train shape:', x_train.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')  
    return (x_train, y_train), (x_test, y_test)
data.py 文件源码 项目:kaos 作者: RuiShu 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, nb_data, batchsize):
        super(MnistSemiSupervised, self).__init__(batchsize)
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        # reshape
        x_train = x_train.reshape(-1, 784)
        x_test = x_test.reshape(-1, 784)

        # subsample
        x_train = x_train.astype('float32')
        x_test = x_test.astype('float32')
        x_train /= 255
        x_test /= 255
        x_u, y_u = x_train, np.zeros((len(x_train), 10))
        x_l, y_l = self.balanced_sampler(x_train, y_train, nb_data)
        # Convert class vectors to binary class matrices.
        y_l = np_utils.to_categorical(y_l, 10)
        y_test = np_utils.to_categorical(y_test, 10)
        self.x_train, self.y_train = x_u, y_u
        self.x_label, self.y_label = x_l, y_l
        self.x_valid, self.y_valid = x_test, y_test
dcgan_mnist.py 文件源码 项目:pythontest 作者: gjq246 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self):
        self.img_rows = 28
        self.img_cols = 28
        self.channel = 1

        #(XX_train, YY_train),(X_test, Y_test) = mnist.load_data()
        print "111111"

        trainData, trainLabels = loadData('./mnisttrain',1000)
        self.x_train = trainData
        #self.x_train = XX_train
        #self.x_train = input_data.read_data_sets("mnist",\
        #   one_hot=True).train.images
        print "222222"
        self.x_train = self.x_train.reshape(-1, self.img_rows,\
            self.img_cols, 1).astype(np.float32)
        print "333333"


        self.DCGAN = DCGAN()
        self.discriminator =  self.DCGAN.discriminator_model()
        self.adversarial = self.DCGAN.adversarial_model()
        self.generator = self.DCGAN.generator()
helpers.py 文件源码 项目:ild-cnn 作者: intact-project 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def load_data():
    # loading mnist dataset
    (X_train, y_train), (X_val, y_val) = mnist.load_data()

    # adding a singleton dimension and rescale to [0,1]
    X_train = np.asarray(np.expand_dims(X_train,1))/float(255)
    X_val = np.asarray(np.expand_dims(X_val,1))/float(255)

    # labels to categorical vectors
    uniquelbls = np.unique(y_train)
    nb_classes = uniquelbls.shape[0]
    zbn = np.min(uniquelbls) # zero based numbering
    y_train = np_utils.to_categorical(y_train-zbn, nb_classes)
    y_val = np_utils.to_categorical(y_val-zbn, nb_classes)

    return (X_train, y_train), (X_val, y_val)
train.py 文件源码 项目:mnist-multi-gpu 作者: normanheckscher 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def get_cifar10():
    """Retrieve the CIFAR dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 64
    input_shape = (3072,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    x_train = x_train.reshape(50000, 3072)
    x_test = x_test.reshape(10000, 3072)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test)
train.py 文件源码 项目:mnist-multi-gpu 作者: normanheckscher 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def get_mnist():
    """Retrieve the MNIST dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 128
    input_shape = (784,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test)
mnist_siamese_generator_pad.py 文件源码 项目:kaggle_art 作者: small-yellow-duck 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def do_split():
    if os.path.isdir('train') and os.path.isdir('test'):
        return

    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    os.mkdir('train')
    os.mkdir('test')

    np.savetxt('labels_train.csv', y_train, header='label')
    np.savetxt('labels_test.csv', y_test, header='label')

    for i in xrange(X_train.shape[0]):
        im = Image.fromarray(np.uint8(X_train[i]))
        im.save('train'+str(i)+'.png')

    for i in xrange(X_test.shape[0]):
        im = Image.fromarray(np.uint8(X_test[i]))
        im.save('test'+str(i)+'.png')   


#if __name__ == "__main__":
mnist_siamese_generator2.py 文件源码 项目:kaggle_art 作者: small-yellow-duck 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def do_split():
    if os.path.isdir('train') and os.path.isdir('test'):
        return

    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    os.mkdir('train')
    os.mkdir('test')

    np.savetxt('labels_train.csv', y_train, header='label')
    np.savetxt('labels_test.csv', y_test, header='label')

    for i in xrange(X_train.shape[0]):
        im = Image.fromarray(np.uint8(X_train[i]))
        im.save('train'+str(i)+'.png')

    for i in xrange(X_test.shape[0]):
        im = Image.fromarray(np.uint8(X_test[i]))
        im.save('test'+str(i)+'.png')


问题


面经


文章

微信
公众号

扫码关注公众号