python类save()的实例源码

test_conv1.py 文件源码 项目:rl-attack-detection 作者: yenchenlin 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def main(args):
    with tf.Graph().as_default() as graph:
        # Create dataset
        logging.info('Create data flow from %s' % args.data)
        caffe_dataset = CaffeDataset(dir=args.data, num_act=args.num_act, mean_path=args.mean)

        # Config session
        config = get_config(args)

        x = tf.placeholder(dtype=tf.float32, shape=[None, 84, 84, 12])
        op = load_caffe_model(x, args.load)

        init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())

        # Start session
        with tf.Session(config=config) as sess:
            sess.run(init)
            i = 0
            for s, a in caffe_dataset(5):
                pred_data = sess.run([op], feed_dict={x: [s]})[0]
                print pred_data.shape
                np.save('tf-%03d.npy' % i, pred_data)
                i += 1
read_fmri.py 文件源码 项目:cortex 作者: rdevon 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def save_mask(data, out_path):
    '''Save mask of data.

    Args:
        data (numpy.array): Data to mask
        out_path (str): Output path for mask.

    '''
    print 'Getting mask'

    s, n, x, y, z = data.shape
    mask = np.zeros((x, y, z))
    _data = data.reshape((s * n, x, y, z))

    mask[np.where(_data.mean(axis=0) > _data.mean())] = 1

    print 'Masked out %d out of %d voxels' % ((mask == 0).sum(), reduce(
        lambda x_, y_: x_ * y_, mask.shape))

    np.save(out_path, mask)
main.py 文件源码 项目:OpenAI_Challenges 作者: AlwaysLearningDeeper 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def main():
    for i in list(range(4))[::-1]:
        print(i+1)
        time.sleep(1)
    c=0
    last_time = time.time()
    while True:
        c+=1
        screen=grab_screen(title='')
        screenG=cv2.cvtColor(screen,cv2.COLOR_BGR2GRAY)
        screenG=cv2.resize(screenG,(80,60))
        keys=key_check()
        output=keys_to_output(keys)
        training_data.append([screenG,output])
        if c%10==0:
            print('Recording at ' + str((10 / (time.time() - last_time)))+' fps')
            last_time = time.time()

        if len(training_data) % 500 == 0:
            print(len(training_data))
            np.save(file_name,training_data)
npz.py 文件源码 项目:cupy 作者: cupy 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def savez(file, *args, **kwds):
    """Saves one or more arrays into a file in uncompressed ``.npz`` format.

    Arguments without keys are treated as arguments with automatic keys named
    ``arr_0``, ``arr_1``, etc. corresponding to the positions in the argument
    list. The keys of arguments are used as keys in the ``.npz`` file, which
    are used for accessing NpzFile object when the file is read by
    :func:`cupy.load` function.

    Args:
        file (file or str): File or filename to save.
        *args: Arrays with implicit keys.
        **kwds: Arrays with explicit keys.

    .. seealso:: :func:`numpy.savez`

    """
    args = map(cupy.asnumpy, args)
    for key in kwds:
        kwds[key] = cupy.asnumpy(kwds[key])
    numpy.savez(file, *args, **kwds)
deepq.py 文件源码 项目:ai-bs-summer17 作者: uchibe 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def backupNetwork(self, model, backup):
        weightMatrix = []
        for layer in model.layers:
            weights = layer.get_weights()
            weightMatrix.append(weights)

        # np.save('weightMatrix.npy', weightMatrix)
        # print(weightMatrix.shape)
        i = 0
        for layer in backup.layers:
            weights = weightMatrix[i]
            layer.set_weights(weights)
            i += 1

    # def loadWeights(self,path):
    #     self.model.set_weights(load_model(path).get_weights())
save_embeddings.py 文件源码 项目:magenta 作者: tensorflow 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def save_arrays(savedir, hparams, z_val):
  """Save arrays as npy files.

  Args:
    savedir: Directory where arrays are saved.
    hparams: Hyperparameters.
    z_val: Array to save.
  """
  z_save_val = np.array(z_val).reshape(-1, hparams.num_latent)

  name = FLAGS.tfrecord_path.split("/")[-1].split(".tfrecord")[0]
  save_name = os.path.join(savedir, "{}_%s.npy".format(name))
  with tf.gfile.Open(save_name % "z", "w") as f:
    np.save(f, z_save_val)

  tf.logging.info("Z_Save:{}".format(z_save_val.shape))
  tf.logging.info("Successfully saved to {}".format(save_name % ""))
test_pulsecal.py 文件源码 项目:Auspex 作者: BBN-Q 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_rabi_amp(self):
        """
        Test RabiAmpCalibration. Ideal data generated by simulate_rabiAmp.
        """

        ideal_data = [np.tile(simulate_rabiAmp(), self.nbr_round_robins)]
        np.save(self.filename, ideal_data)
        rabi_cal = cal.RabiAmpCalibration(self.q.label, num_steps = len(ideal_data[0])/(2*self.nbr_round_robins))
        cal.calibrate([rabi_cal])
        os.remove(self.filename)
        self.assertAlmostEqual(rabi_cal.pi_amp,1,places=2)
        self.assertAlmostEqual(rabi_cal.pi2_amp,0.5,places=2)
        #test update_settings
        new_settings = auspex.config.load_meas_file(cfg_file)
        self.assertAlmostEqual(rabi_cal.pi_amp, new_settings['qubits'][self.q.label]['control']['pulse_params']['piAmp'], places=4)
        self.assertAlmostEqual(rabi_cal.pi2_amp, new_settings['qubits'][self.q.label]['control']['pulse_params']['pi2Amp'], places=4)
        #restore original settings
        auspex.config.dump_meas_file(self.test_settings, cfg_file)
__init__.py 文件源码 项目:sudokuextract 作者: hbldh 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _save_data(which, X, y, data_source):
    if data_source.lower() == 'mnist':
        data_source = 'mnist'
    else:
        data_source = 'se'

    if X.shape[0] != len(y):
        raise TypeError("Length of data samples ({0}) was not identical "
                        "to length of labels ({1})".format(X.shape[0], len(y)))

    # Convert to numpy array.
    if not isinstance(X, np.ndarray):
        X = np.array(X)
    if not isinstance(y, np.ndarray):
        y = np.array(y)

    # Write feature_data
    fname = resource_filename('sudokuextract.data', "{0}-{1}-data.gz".format(data_source, which))
    with gzip.GzipFile(fname, mode='wb') as f:
        np.save(f, X)

    # Write labels
    fname = resource_filename('sudokuextract.data', "{0}-{1}-labels.gz".format(data_source, which))
    with gzip.GzipFile(fname, mode='wb') as f:
        np.save(f, y)
utilities.py 文件源码 项目:aueb.twitter.sentiment 作者: nlpaueb 项目源码 文件源码 阅读 50 收藏 0 点赞 0 评论 0
def loadGlove(d=200):
    start = time.time()

    f1 = 'resources/words.pkl'
    f2 = 'resources/embeddings.npy'

    if (os.path.isfile(f1) and os.path.isfile(f2)):
        with open(f1, 'rb') as input:
            w = pickle.load(input)

        e = np.load(f2)
        glove = GloveDictionary.Glove(words=w, emb=e)
    else:
    glove = GloveDictionary.Glove(d)
    saveGlove(glove)

    end=time.time()
    return glove

#save trained moleds
utilities.py 文件源码 项目:aueb.twitter.sentiment 作者: nlpaueb 项目源码 文件源码 阅读 63 收藏 0 点赞 0 评论 0
def getConfidenceScores(features_train, labels_train, C):
    train_confidence = []
    #confidence scores for training data are computed using K-fold cross validation
    kfold = KFold(features_train.shape[0], n_folds=10)

    for train_index,test_index in kfold:
        X_train, X_test = features_train[train_index], features_train[test_index]
        y_train, y_test = labels_train[train_index], labels_train[test_index]

        #train classifier for the subset of train data
        m = SVM.train(X_train,y_train,c=C,k="linear")

        #predict confidence for test data and append it to list
        conf = m.decision_function(X_test)
        for x in conf:
                train_confidence.append(x)

    return np.array(train_confidence)

#save pos scores
get_dataset.py 文件源码 项目:Cat-Segmentation 作者: ardamavi 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def get_dataset(dataset_path='Data/Train_Data'):
    # Getting all data from data path:
    try:
        X = np.load('Data/npy_train_data/X.npy')
        Y = np.load('Data/npy_train_data/Y.npy')
    except:
        inputs_path = dataset_path+'/input'
        images = listdir(inputs_path) # Geting images
        X = []
        Y = []
        for img in images:
            img_path = inputs_path+'/'+img

            x_img = get_img(img_path).astype('float32').reshape(64, 64, 3)
            x_img /= 255.

            y_img = get_img(img_path.replace('input/', 'mask/mask_')).astype('float32').reshape(64, 64, 1)
            y_img /= 255.

            X.append(x_img)
            Y.append(y_img)
        X = np.array(X)
        Y = np.array(Y)
        # Create dateset:
        if not os.path.exists('Data/npy_train_data/'):
            os.makedirs('Data/npy_train_data/')
        np.save('Data/npy_train_data/X.npy', X)
        np.save('Data/npy_train_data/Y.npy', Y)
    X, X_test, Y, Y_test = train_test_split(X, Y, test_size=0.1, random_state=42)
    return X, X_test, Y, Y_test
genderclassifier.py 文件源码 项目:namegenderclassifier 作者: joaoalvarenga 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def save(self, model_filename):
        self.__model.save("%s.model" % model_filename)
        np.save("%s.tvocab" % model_filename, np.asarray(self.__trigrams))
        np.save("%s.cvocab" % model_filename, np.asarray(self.__chars))
        np.save("%s.classes" % model_filename, np.asarray(self.__classes))
utils.py 文件源码 项目:variational-text-tensorflow 作者: carpedm20 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def save_pkl(path, obj):
  with open(path, 'w') as f:
    cPickle.dump(obj, f)
    print(" [*] save %s" % path)
utils.py 文件源码 项目:variational-text-tensorflow 作者: carpedm20 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def save_npy(path, obj):
  np.save(path, obj)
  print(" [*] save %s" % path)
get_hof.py 文件源码 项目:Deep360Pilot-optical-flow 作者: yenchenlin 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_hof(name):
    print name

    FLOW_DIR = 'data/of_' + args.domain + '/' + name + '/'
    BOXES_DIR = 'data/feature_' + args.domain + \
        '_' + str(args.n_boxes) + 'boxes/' + name + '/'

    n_frames = len(glob.glob(FLOW_DIR + '*.png'))

    # init boxes
    clip_boxes_index = 1
    clip_boxes = np.load(BOXES_DIR + 'roislist{:04d}.npy'.format(clip_boxes_index))

    # init hof
    hof_shape = (50, args.n_boxes, 12)
    hof = np.zeros(hof_shape)

    for i in xrange(1, n_frames+1):
        print "{}, Flow {}, ".format(name, i)
        # boxes
        new_clip_boxes_index = (i-1) / 50 + 1
        if clip_boxes_index != new_clip_boxes_index:
            # 1.1 save hof and init a new one
            np.save(BOXES_DIR + 'hof{:04d}.npy'.format(clip_boxes_index), hof)
            hof = np.zeros(hof_shape)

            # 2.1 update clip_boxes
            clip_boxes_index = new_clip_boxes_index
            clip_boxes = np.load(BOXES_DIR + 'roislist{:04d}.npy'.format(clip_boxes_index))

        flow_img = np.array(cv2.imread(FLOW_DIR + '{:06d}.png'.format(i)), dtype=np.float32)

        frame_boxes = clip_boxes[(i-1) % 50].astype(int)
        for box_id, (xmin, ymin, xmax, ymax) in enumerate(frame_boxes):
            xmin, ymin, xmax, ymax = preprocess_box(xmin, ymin, xmax, ymax)
            box_flow_img = flow_img[ymin:ymax, xmin:xmax, :]
            hof[(i-1) % 50][box_id], _ = flow_to_hist(box_flow_img)

    # save latest hof
    np.save(BOXES_DIR + 'hof{:04d}.npy'.format(clip_boxes_index), hof)
indoor3d_util.py 文件源码 项目:pointnet 作者: charlesq34 项目源码 文件源码 阅读 79 收藏 0 点赞 0 评论 0
def collect_point_label(anno_path, out_filename, file_format='txt'):
    """ Convert original dataset files to data_label file (each line is XYZRGBL).
        We aggregated all the points from each instance in the room.

    Args:
        anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
        out_filename: path to save collected points and labels (each line is XYZRGBL)
        file_format: txt or numpy, determines what file format to save.
    Returns:
        None
    Note:
        the points are shifted before save, the most negative point is now at origin.
    """
    points_list = []

    for f in glob.glob(os.path.join(anno_path, '*.txt')):
        cls = os.path.basename(f).split('_')[0]
        if cls not in g_classes: # note: in some room there is 'staris' class..
            cls = 'clutter'
        points = np.loadtxt(f)
        labels = np.ones((points.shape[0],1)) * g_class2label[cls]
        points_list.append(np.concatenate([points, labels], 1)) # Nx7

    data_label = np.concatenate(points_list, 0)
    xyz_min = np.amin(data_label, axis=0)[0:3]
    data_label[:, 0:3] -= xyz_min

    if file_format=='txt':
        fout = open(out_filename, 'w')
        for i in range(data_label.shape[0]):
            fout.write('%f %f %f %d %d %d %d\n' % \
                          (data_label[i,0], data_label[i,1], data_label[i,2],
                           data_label[i,3], data_label[i,4], data_label[i,5],
                           data_label[i,6]))
        fout.close()
    elif file_format=='numpy':
        np.save(out_filename, data_label)
    else:
        print('ERROR!! Unknown file format: %s, please use txt or numpy.' % \
            (file_format))
        exit()
indoor3d_util.py 文件源码 项目:pointnet 作者: charlesq34 项目源码 文件源码 阅读 58 收藏 0 点赞 0 评论 0
def collect_bounding_box(anno_path, out_filename):
    """ Compute bounding boxes from each instance in original dataset files on
        one room. **We assume the bbox is aligned with XYZ coordinate.**

    Args:
        anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
        out_filename: path to save instance bounding boxes for that room.
            each line is x1 y1 z1 x2 y2 z2 label,
            where (x1,y1,z1) is the point on the diagonal closer to origin
    Returns:
        None
    Note:
        room points are shifted, the most negative point is now at origin.
    """
    bbox_label_list = []

    for f in glob.glob(os.path.join(anno_path, '*.txt')):
        cls = os.path.basename(f).split('_')[0]
        if cls not in g_classes: # note: in some room there is 'staris' class..
            cls = 'clutter'
        points = np.loadtxt(f)
        label = g_class2label[cls]
        # Compute tightest axis aligned bounding box
        xyz_min = np.amin(points[:, 0:3], axis=0)
        xyz_max = np.amax(points[:, 0:3], axis=0)
        ins_bbox_label = np.expand_dims(
            np.concatenate([xyz_min, xyz_max, np.array([label])], 0), 0)
        bbox_label_list.append(ins_bbox_label)

    bbox_label = np.concatenate(bbox_label_list, 0)
    room_xyz_min = np.amin(bbox_label[:, 0:3], axis=0)
    bbox_label[:, 0:3] -= room_xyz_min 
    bbox_label[:, 3:6] -= room_xyz_min 

    fout = open(out_filename, 'w')
    for i in range(bbox_label.shape[0]):
        fout.write('%f %f %f %f %f %f %d\n' % \
                      (bbox_label[i,0], bbox_label[i,1], bbox_label[i,2],
                       bbox_label[i,3], bbox_label[i,4], bbox_label[i,5],
                       bbox_label[i,6]))
    fout.close()
word2vec.py 文件源码 项目:deep-learning 作者: ljanyst 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def save_checkpoint(self, session, step):
        self.saver.save(session, self.logdir + "/model.ckpt", step)

#-------------------------------------------------------------------------------
chunks.py 文件源码 项目:cloud-volume 作者: seung-lab 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def encode_npz(subvol):
    """
    This file format is unrelated to np.savez
    We are just saving as .npy and the compressing
    using zlib. 
    The .npy format contains metadata indicating
    shape and dtype, instead of np.tobytes which doesn't
    contain any metadata.
    """
    fileobj = io.BytesIO()
    if len(subvol.shape) == 3:
        subvol = np.expand_dims(subvol, 0)
    np.save(fileobj, subvol)
    cdz = zlib.compress(fileobj.getvalue())
    return cdz
bose_hubbard_mft.py 文件源码 项目:snake 作者: rhinech 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def generate_data():
    """Generate grid of data for interpolation."""
    res = []
    for hopping in np.linspace(0.0, 0.12, GRID_SIZE):
        for mu in np.linspace(2.0, 3.0, GRID_SIZE):
            print(hopping, mu)
            res.append(np.concatenate([[hopping, mu], optimize(hopping, mu)]))
    res = np.array(res)
    np.save(r'data_%d' % GRID_SIZE, np.array(res))


问题


面经


文章

微信
公众号

扫码关注公众号