python类loadtxt()的实例源码

losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def calculate_loss_mix2(self, predictions, predictions_class, predictions_encoder, labels, **unused_params):
    with tf.name_scope("loss_mix2"):
      float_labels = tf.cast(labels, tf.float32)
      float_encoders = float_labels
      for i in range(FLAGS.encoder_layers):
        var_i = np.loadtxt(FLAGS.autoencoder_dir+'autoencoder_layer%d.model' % i)
        weight_i = tf.constant(var_i[:-1,:],dtype=tf.float32)
        bias_i = tf.reshape(tf.constant(var_i[-1,:],dtype=tf.float32),[-1])
        float_encoders = tf.nn.xw_plus_b(float_encoders,weight_i,bias_i)
        if i<FLAGS.encoder_layers-1:
          float_encoders = tf.nn.relu(float_encoders)
        else:
          hidden_mean = tf.reduce_mean(float_encoders,axis=1,keep_dims=True)
          hidden_std = tf.sqrt(tf.reduce_mean(tf.square(float_encoders-hidden_mean),axis=1,keep_dims=True))
          float_encoders = (float_encoders-hidden_mean)/(hidden_std+1e-6)
          #float_encoders = tf.nn.sigmoid(float_encoders)
      cross_entropy_encoder = 0.1*self.calculate_mseloss(predictions_encoder,float_encoders)
      cross_entropy_loss = self.calculate_loss(predictions,labels)
      return cross_entropy_encoder+cross_entropy_loss, float_encoders
      #return cross_entropy_encoder, float_encoders
gpUtils.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def normalizeSNPs(normMethod, X, y, prev=None, frqFile=None):
    if (normMethod == 'frq'):
        print 'flipping SNPs for standardization...'
        empMean = X.mean(axis=0) / 2.0
        X[:, empMean>0.5] = 2 - X[:, empMean>0.5]       
        mafs = np.loadtxt(frqFile, usecols=[1,2]).mean(axis=1)
        snpsMean = 2*mafs
        snpsStd = np.sqrt(2*mafs*(1-mafs))
    elif (normMethod == 'controls'):
        controls = (y<y.mean())
        cases = ~controls
        snpsMeanControls, snpsStdControls = X[controls, :].mean(axis=0), X[controls, :].std(axis=0)
        snpsMeanCases, snpsStdCases = X[cases, :].mean(axis=0), X[cases, :].std(axis=0)
        snpsMean = (1-prev)*snpsMeanControls + prev*snpsMeanCases
        snpsStd = (1-prev)*snpsStdControls + prev*snpsStdCases
    elif (normMethod is None): snpsMean, snpsStd = X.mean(axis=0), X.std(axis=0)
    else: raise Exception('Unrecognized normalization method: ' + normMethod)

    return snpsMean, snpsStd
pybrain_captcha.py 文件源码 项目:Verification-code-crack 作者: weixianglin 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def predict():
    fnn=joblib.load(PKL)
    dir='E:/????/??????/1 ???/captcha_master1/captcha_master/worddata/'
    predictValue = []
    for fr in os.listdir(dir):
        dataset=[]
        f = dir + fr
        if f.rfind(u'.DS_Store') == -1 and f.rfind(u'Thumbs.db') == -1:
            data = np.loadtxt(f, delimiter=',')
            #data.reshape((1,2500))
            for item in data:
                dataset.append(int(item))

            #print(len(dataset))
            out = fnn.activate(dataset)
            out = out.argmax()
            iconset = ['3', 'c', 'd', 'e', 'f', 'h', 'j', 'k', 'l', 'm', 'n', 'w', 'x', 'y']
            for y, word in enumerate(iconset):
                if out == y:
                    print(word)
                    predictValue.append(word)

    print(u'????%s' % (''.join(predictValue)))
plyfile.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _read(self, stream, text, byte_order):
        '''
        Read the actual data from a PLY file.

        '''
        if self._have_list:
            # There are list properties, so a simple load is
            # impossible.
            if text:
                self._read_txt(stream)
            else:
                self._read_bin(stream, byte_order)
        else:
            # There are no list properties, so loading the data is
            # much more straightforward.
            if text:
                self.data = _np.loadtxt(
                    _islice(iter(stream.readline, ''), self.count),
                    self.dtype())
            else:
                self.data = _np.fromfile(
                    stream, self.dtype(byte_order), self.count)
uw_rgbd.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, target, instance, files): 
            self.target = target 
            self.instance = instance
            mask_files = natural_sort(filter(lambda fn: '_maskcrop.png' in fn, files))
            depth_files = natural_sort(filter(lambda  fn: '_depthcrop.png' in fn, files))
            rgb_files = natural_sort(list(set(files) - set(mask_files) - set(depth_files)))
            loc_files = natural_sort(map(lambda fn: fn.replace('_crop.png', '_loc.txt'), rgb_files))

            # Ensure all have equal number of files (Hack! doesn't ensure filename consistency)
            nfiles = np.min([len(loc_files), len(mask_files), len(depth_files), len(rgb_files)])
            mask_files, depth_files, rgb_files, loc_files = mask_files[:nfiles], depth_files[:nfiles], \
                                                            rgb_files[:nfiles], loc_files[:nfiles]

            # print target, instance, len(loc_files), len(mask_files), len(depth_files), len(rgb_files)
            assert(len(mask_files) == len(depth_files) == len(rgb_files) == len(loc_files))

            # Read images
            self.rgb = ImageDatasetReader.from_filenames(rgb_files)
            self.depth = ImageDatasetReader.from_filenames(depth_files)
            self.mask = ImageDatasetReader.from_filenames(mask_files)

            # Read top-left locations of bounding box
            self.locations = np.vstack([np.loadtxt(loc, delimiter=',', dtype=np.int32) 
                                        for loc in loc_files])
uw_rgbd.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def load_ply(fn, version): 
            """ Retrieve aligned point cloud for each scene """ 

            if version == 'v1': 
                raise ValueError('''Version %s not supported. '''
                                 '''Check dataset and choose either v1 or v2 scene dataset''' % version)
                # P = np.loadtxt(os.path.expanduser(fn), usecols=(2,3,4,5,6,7,8), dtype=np.float64)
                # return map(lambda p: RigidTransform(Quaternion.from_wxyz(p[:4]), p[4:]), P)
            elif version == 'v2': 
                ply = PlyData.read(os.path.expanduser(fn))
                xyz = np.vstack([ply['vertex'].data['x'], 
                                 ply['vertex'].data['y'], 
                                 ply['vertex'].data['z']]).T
                rgb = np.vstack([ply['vertex'].data['diffuse_red'], 
                                 ply['vertex'].data['diffuse_green'], 
                                 ply['vertex'].data['diffuse_blue']]).T
                return xyz, rgb

            else: 
                raise ValueError('''Version %s not supported. '''
                                 '''Check dataset and choose either v1 or v2 scene dataset''' % version)
tsukuba.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def tsukuba_load_poses(fn): 
    """ 
    Retrieve poses
    X Y Z R P Y - > X -Y -Z R -P -Y

    np.deg2rad(p[3]),-np.deg2rad(p[4]),-np.deg2rad(p[5]),
        p[0]*.01,-p[1]*.01,-p[2]*.01, axes='sxyz') for p in P ]

    """ 
    P = np.loadtxt(os.path.expanduser(fn), dtype=np.float64, delimiter=',')
    return [ RigidTransform.from_rpyxyz(np.pi, 0, 0, 0, 0, 0) * \
             RigidTransform.from_rpyxyz(
                 np.deg2rad(p[3]),np.deg2rad(p[4]),np.deg2rad(p[5]),
                 p[0]*.01,p[1]*.01,p[2]*.01, axes='sxyz') * \
             RigidTransform.from_rpyxyz(np.pi, 0, 0, 0, 0, 0) for p in P ]

    # return [ RigidTransform.from_rpyxyz(
    #     np.deg2rad(p[3]),-np.deg2rad(p[4]),-np.deg2rad(p[5]),
    #     p[0]*.01,-p[1]*.01,-p[2]*.01, axes='sxyz') for p in P ]
test_nestio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_values(self):
        """
        Tests if the function returns the correct values.
        """

        filename = get_test_file_full_path(
                ioclass=NestIO,
                filename='0gid-1time-2gex-3Vm-1261-0.dat',
                directory=self.local_test_dir, clean=False)

        id_to_test = 1
        r = NestIO(filenames=filename)
        seg = r.read_segment(gid_list=[id_to_test],
                             t_stop=1000. * pq.ms,
                             sampling_period=pq.ms, lazy=False,
                             id_column_dat=0, time_column_dat=1,
                             value_columns_dat=2, value_types='V_m')

        dat = np.loadtxt(filename)
        target_data = dat[:, 2][np.where(dat[:, 0] == id_to_test)]
        target_data = target_data[:, None]
        st = seg.analogsignals[0]
        np.testing.assert_array_equal(st.magnitude, target_data)
test_nestio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_values(self):
        """
        Tests if the routine loads the correct numbers from the file.
        """
        id_to_test = 1
        filename = get_test_file_full_path(
                ioclass=NestIO,
                filename='0gid-1time-1256-0.gdf',
                directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)
        seg = r.read_segment(gid_list=[id_to_test],
                             t_start=400. * pq.ms,
                             t_stop=500. * pq.ms, lazy=False,
                             id_column_gdf=0, time_column_gdf=1)

        dat = np.loadtxt(filename)
        target_data = dat[:, 1][np.where(dat[:, 0] == id_to_test)]

        st = seg.spiketrains[0]
        np.testing.assert_array_equal(st.magnitude, target_data)
nestio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, filename):
        """
        filename: string, path to ASCII file to read.
        """

        self.filename = filename

        # read the first line to check the data type (int or float) of the data
        f = open(self.filename)
        line = f.readline()

        additional_parameters = {}
        if '.' not in line:
            additional_parameters['dtype'] = np.int32

        self.data = np.loadtxt(self.filename, **additional_parameters)

        if len(self.data.shape) == 1:
            self.data = self.data[:, np.newaxis]
test_nestio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_values(self):
        """
        Tests if the function returns the correct values.
        """

        filename = get_test_file_full_path(
                ioclass=NestIO,
                filename='0gid-1time-2gex-3Vm-1261-0.dat',
                directory=self.local_test_dir, clean=False)

        id_to_test = 1
        r = NestIO(filenames=filename)
        seg = r.read_segment(gid_list=[id_to_test],
                             t_stop=1000. * pq.ms,
                             sampling_period=pq.ms, lazy=False,
                             id_column_dat=0, time_column_dat=1,
                             value_columns_dat=2, value_types='V_m')

        dat = np.loadtxt(filename)
        target_data = dat[:, 2][np.where(dat[:, 0] == id_to_test)]
        target_data = target_data[:, None]
        st = seg.analogsignals[0]
        np.testing.assert_array_equal(st.magnitude, target_data)
nestio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def __init__(self, filename):
        """
        filename: string, path to ASCII file to read.
        """

        self.filename = filename

        # read the first line to check the data type (int or float) of the data
        f = open(self.filename)
        line = f.readline()

        additional_parameters = {}
        if '.' not in line:
            additional_parameters['dtype'] = np.int32

        self.data = np.loadtxt(self.filename, **additional_parameters)

        if len(self.data.shape) == 1:
            self.data = self.data[:, np.newaxis]
amazon_main_xgboost.py 文件源码 项目:ensemble_amazon 作者: kaz-Anova 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def load_data(filename, use_labels=True):
    """
    Load data from CSV files and return them as numpy arrays
    The use_labels parameter indicates whether one should
    read the first column (containing class labels). If false,
    return all 0s. 
    """

    # load column 1 to 8 (ignore last one)
    data = np.loadtxt(open( filename), delimiter=',',
                      usecols=range(1, 9), skiprows=1)
    if use_labels:
        labels = np.loadtxt(open( filename), delimiter=',',
                            usecols=[0], skiprows=1)
    else:
        labels = np.zeros(data.shape[0])
    return labels, data
amazon_main_logit_3way_best.py 文件源码 项目:ensemble_amazon 作者: kaz-Anova 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def load_data(filename, use_labels=True):
    """
    Load data from CSV files and return them as numpy arrays
    The use_labels parameter indicates whether one should
    read the first column (containing class labels). If false,
    return all 0s. 
    """

    # load column 1 to 8 (ignore last one)
    data = np.loadtxt(open( filename), delimiter=',',
                      usecols=range(1, 9), skiprows=1)
    if use_labels:
        labels = np.loadtxt(open( filename), delimiter=',',
                            usecols=[0], skiprows=1)
    else:
        labels = np.zeros(data.shape[0])
    return labels, data
amazon_main_logit_3way.py 文件源码 项目:ensemble_amazon 作者: kaz-Anova 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def load_data(filename, use_labels=True):
    """
    Load data from CSV files and return them as numpy arrays
    The use_labels parameter indicates whether one should
    read the first column (containing class labels). If false,
    return all 0s. 
    """

    # load column 1 to 8 (ignore last one)
    data = np.loadtxt(open( filename), delimiter=',',
                      usecols=range(1, 9), skiprows=1)
    if use_labels:
        labels = np.loadtxt(open( filename), delimiter=',',
                            usecols=[0], skiprows=1)
    else:
        labels = np.zeros(data.shape[0])
    return labels, data
amazon_main_logit_2D.py 文件源码 项目:ensemble_amazon 作者: kaz-Anova 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def load_data(filename, use_labels=True):
    """
    Load data from CSV files and return them as numpy arrays
    The use_labels parameter indicates whether one should
    read the first column (containing class labels). If false,
    return all 0s. 
    """

    # load column 1 to 8 (ignore last one)
    data = np.loadtxt(open( filename), delimiter=',',
                      usecols=range(1, 9), skiprows=1)
    if use_labels:
        labels = np.loadtxt(open( filename), delimiter=',',
                            usecols=[0], skiprows=1)
    else:
        labels = np.zeros(data.shape[0])
    return labels, data
profiles.py 文件源码 项目:mbin 作者: fanglab 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def launch():
    opts, h5_files, motifs_fn = __parseArgs()
    __initLog(opts)

    motifs           = np.loadtxt(motifs_fn, dtype="str", ndmin=1)
    motifs,not_found = find_motifs_in_control(opts, motifs)
    if len(not_found)>0:
        logging.warning("")
        logging.warning("  ******************** Important *********************")
        logging.warning("  Did not find %s motifs in %s:" % (len(not_found), opts.control_pkl_name))
        for nf in not_found:
            logging.warning("       %s" % nf)
        logging.warning("  These motif(s) will be removed from further analysis.")
        logging.warning("  These %s motifs will be kept:" % len(motifs))
        for m in motifs:
            logging.warning("       %s" % m)
        logging.warning("  ****************************************************")
        logging.warning("")
    else:
        logging.info("Found entries for all %s motifs in %s" % (len(motifs), opts.control_pkl_name))


    build_profiles(opts, h5_files, motifs, motifs_fn)

    print >> sys.stderr, "mBin methylation profiling has finished running. See log for details."
visualize.py 文件源码 项目:mbin 作者: fanglab 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def parse_fields( self ):
        """
        Read in the fields contained in the output files from methylprofiles.
        """
        m = np.loadtxt(self.mfn, dtype="str", skiprows=1)
        o = np.loadtxt(self.ofn, dtype="str", skiprows=1)

        # Optional flags
        m = self.length_filter(m)
        o = self.length_filter(o)
        if self.opts.n_seqs!=None:
            m = self.subsample_seqs( m )
            o = self.subsample_seqs( o )

        # Pull out values
        self.ids     = m[:,0].astype("str")
        self.lens    = m[:,1].astype("int")
        self.mscores = m[:,2:].astype("float")
        if self.opts.seq_type=="contig":
            self.covs     = o[:,2].astype("float")
            self.covcomps = o[:,2:].astype("float")
            self.comps    = o[:,3:].astype("float")
        else:
            self.comps    = o[:,2:].astype("float")
motifs.py 文件源码 项目:mbin 作者: fanglab 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def transpose_contig_matrix( args ):
    contig  = args[0]
    opts    = args[1]
    logging.info("  Transposing %s" % contig)
    contig_ipds_fn       = os.path.join( opts.tmp, "%s_ipds.tmp"       % contig)
    contig_ipds_kmers_fn = os.path.join( opts.tmp, "%s_ipdskmers.tmp"  % contig)
    contig_ipds_N_fn     = os.path.join( opts.tmp, "%s_ipdsN.tmp"      % contig)
    contig_ipds          = np.loadtxt(contig_ipds_fn,       dtype="float")
    contig_ipds_kmers    = np.loadtxt(contig_ipds_kmers_fn, dtype="str")
    contig_ipds_N        = np.loadtxt(contig_ipds_N_fn,     dtype="int")
    if len(contig_ipds.shape)==1:
        contig_ipds   = contig_ipds.reshape(1,contig_ipds.shape[0])
        contig_ipds_N = contig_ipds_N.reshape(1,contig_ipds_N.shape[0])

    contig_ipds    = contig_ipds.T
    contig_ipds_N  = contig_ipds_N.T
    np.savetxt(contig_ipds_fn+".trans",   contig_ipds,   fmt="%.4f", delimiter="\t")
    np.savetxt(contig_ipds_N_fn+".trans", contig_ipds_N, fmt="%s",   delimiter="\t")
    return None
tools.py 文件源码 项目:TI-pooling 作者: dlaptev 项目源码 文件源码 阅读 47 收藏 0 点赞 0 评论 0
def __init__(self,
               name,
               number_of_classes,
               number_of_transformations,
               loaded_size,
               desired_size,
               max_size=None):
    loaded = np.loadtxt(name)
    if max_size is not None:
      subset = np.random.choice(loaded.shape[0], max_size, replace=False)
      loaded = loaded[subset, :]
    padded_x = self._pad(loaded[:, :-1], loaded_size, desired_size)
    self._x = self._transform(padded_x, number_of_transformations)
    self._y = self._int_labels_to_one_hot(loaded[:, -1], number_of_classes)
    self._completed_epochs = -1
    self._new_epoch = False
    self._start_new_epoch()
utils.py 文件源码 项目:BISIP 作者: clberube 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_data(filename,headers,ph_units):
    # Importation des données .DAT
    dat_file = np.loadtxt("%s"%(filename),skiprows=headers,delimiter=',')
    labels = ["freq", "amp", "pha", "amp_err", "pha_err"]
    data = {l:dat_file[:,i] for (i,l) in enumerate(labels)}
    if ph_units == "mrad":
        data["pha"] = data["pha"]/1000                    # mrad to rad
        data["pha_err"] = data["pha_err"]/1000              # mrad to rad
    if ph_units == "deg":
        data["pha"] = np.radians(data["pha"])               # deg to rad
        data["pha_err"] = np.radians(data["pha_err"])       # deg to rad
    data["phase_range"] = abs(max(data["pha"])-min(data["pha"])) # Range of phase measurements (used in NRMS error calculation)
    data["Z"]  = data["amp"]*(np.cos(data["pha"]) + 1j*np.sin(data["pha"]))
    EI = np.sqrt(((data["amp"]*np.cos(data["pha"])*data["pha_err"])**2)+(np.sin(data["pha"])*data["amp_err"])**2)
    ER = np.sqrt(((data["amp"]*np.sin(data["pha"])*data["pha_err"])**2)+(np.cos(data["pha"])*data["amp_err"])**2)
    data["Z_err"] = ER + 1j*EI
    # Normalization of amplitude
    data["Z_max"] = max(abs(data["Z"]))  # Maximum amplitude
    zn, zn_e = data["Z"]/data["Z_max"], data["Z_err"]/data["Z_max"] # Normalization of impedance by max amplitude
    data["zn"] = np.array([zn.real, zn.imag]) # 2D array with first column = real values, second column = imag values
    data["zn_err"] = np.array([zn_e.real, zn_e.imag]) # 2D array with first column = real values, second column = imag values
    return data
utils.py 文件源码 项目:BISIP 作者: clberube 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_data(filename,headers,ph_units):
    # Importation des données .DAT
    dat_file = np.loadtxt("%s"%(filename),skiprows=headers,delimiter=',')
    labels = ["freq", "amp", "pha", "amp_err", "pha_err"]
    data = {l:dat_file[:,i] for (i,l) in enumerate(labels)}
    if ph_units == "mrad":
        data["pha"] = data["pha"]/1000                    # mrad to rad
        data["pha_err"] = data["pha_err"]/1000              # mrad to rad
    if ph_units == "deg":
        data["pha"] = np.radians(data["pha"])               # deg to rad
        data["pha_err"] = np.radians(data["pha_err"])       # deg to rad
    data["phase_range"] = abs(max(data["pha"])-min(data["pha"])) # Range of phase measurements (used in NRMS error calculation)
    data["Z"]  = data["amp"]*(np.cos(data["pha"]) + 1j*np.sin(data["pha"]))
    EI = np.sqrt(((data["amp"]*np.cos(data["pha"])*data["pha_err"])**2)+(np.sin(data["pha"])*data["amp_err"])**2)
    ER = np.sqrt(((data["amp"]*np.sin(data["pha"])*data["pha_err"])**2)+(np.cos(data["pha"])*data["amp_err"])**2)
    data["Z_err"] = ER + 1j*EI
    # Normalization of amplitude
    data["Z_max"] = max(abs(data["Z"]))  # Maximum amplitude
    zn, zn_e = data["Z"]/data["Z_max"], data["Z_err"]/data["Z_max"] # Normalization of impedance by max amplitude
    data["zn"] = np.array([zn.real, zn.imag]) # 2D array with first column = real values, second column = imag values
    data["zn_err"] = np.array([zn_e.real, zn_e.imag]) # 2D array with first column = real values, second column = imag values
    return data
test_Model_Parallax.py 文件源码 项目:MulensModel 作者: rpoleski 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def do_annual_parallax_test(filename):
    """testing functions called by a few unit tests"""
    with open(filename) as data_file:
        lines = data_file.readlines()
    ulens_params = lines[3].split()
    event_params = lines[4].split()
    data = np.loadtxt(filename, dtype=None)
    model = Model({
        't_0':float(ulens_params[1])+2450000., 
        'u_0':float(ulens_params[3]), 
        't_E':float(ulens_params[4]), 
        'pi_E_N':float(ulens_params[5]), 
        'pi_E_E':float(ulens_params[6]) }, 
        coords=SkyCoord(
            event_params[1]+' '+event_params[2], unit=(u.deg, u.deg)))
    model.parameters.t_0_par = float(ulens_params[2])+2450000.

    time = data[:,0]
    dataset = MulensData([time, 20.+time*0., 0.1+time*0.,], add_2450000=True)
    model.set_datasets([dataset])
    model.parallax(satellite=False, earth_orbital=True, topocentric=False)
    return np.testing.assert_almost_equal(
        model.data_magnification[0] / data[:,1], 1.0, decimal=4)
test_Model_Parallax.py 文件源码 项目:MulensModel 作者: rpoleski 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_satellite_and_annual_parallax_calculation():
    """test parallax calculation with Spitzer data"""
    model_with_par = Model({'t_0':2457181.93930, 'u_0':0.08858, 't_E':20.23090, 
                            'pi_E_N':-0.05413, 'pi_E_E':-0.16434}, 
                            coords="18:17:54.74 -22:59:33.4")
    model_with_par.parallax(satellite=True, earth_orbital=True, 
                            topocentric=False)
    model_with_par.parameters.t_0_par = 2457181.9

    data_OGLE = MulensData(file_name=SAMPLE_FILE_02, add_2450000=True)
    data_Spitzer = MulensData(
        file_name=SAMPLE_FILE_03, ephemerides_file=SAMPLE_FILE_03_EPH,
        add_2450000=True)
    model_with_par.set_datasets([data_OGLE, data_Spitzer])

    ref_OGLE = np.loadtxt(SAMPLE_FILE_02_REF, unpack=True, usecols=[5])
    ref_Spitzer = np.loadtxt(SAMPLE_FILE_03_REF, unpack=True, usecols=[5])

    np.testing.assert_almost_equal(model_with_par.data_magnification[0], 
                                    ref_OGLE, decimal=2)
    ratio = model_with_par.data_magnification[1] / ref_Spitzer
    np.testing.assert_almost_equal(ratio, [1.]*len(ratio), decimal=3)
osm-randomize-coord.py 文件源码 项目:atoolbox 作者: liweitianux 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def replace_coord(self, coordfile):
        """
        Replace the coordinates with the data from the given
        coordinate file.
        """
        try:
            coord_new = np.loadtxt(coordfile)
        except ValueError:
            coord_new = np.loadtxt(coordfile, delimiter=",")
        ra_new = coord_new[:, 0]
        dec_new = coord_new[:, 1]
        if self.number != len(ra_new):
            raise RuntimeError("invalid coordinate file: %s" % coordfile)
        self.ra = ra_new
        self.dec = dec_new
        print("Replaced coordinates")
pre_sumstats.py 文件源码 项目:PleioPred 作者: yiminghu 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_1000G_snps(sumstats, out_file):
    sf = np.loadtxt(sumstats,dtype=str,skiprows=1)
    h5f = h5py.File('ref/Misc/1000G_SNP_info.h5','r')
    rf = h5f['snp_chr'][:]
    h5f.close()
    ind1 = np.in1d(sf[:,1],rf[:,2])
    ind2 = np.in1d(rf[:,2],sf[:,1])
    sf1 = sf[ind1]
    rf1 = rf[ind2]
    ### check order ###
    if sum(sf1[:,1]==rf1[:,2])==len(rf1[:,2]):
        print 'Good!'
    else:
        print 'Shit happens, sorting sf1 to have the same order as rf1'
        O1 = np.argsort(sf1[:,1])
        O2 = np.argsort(rf1[:,2])
        O3 = np.argsort(O2)
        sf1 = sf1[O1][O3]
    out = ['hg19chrc snpid a1 a2 bp or p'+'\n']
    for i in range(len(sf1[:,1])):
        out.append(sf1[:,0][i]+' '+sf1[:,1][i]+' '+sf1[:,2][i]+' '+sf1[:,3][i]+' '+rf1[:,1][i]+' '+sf1[:,5][i]+' '+sf1[:,6][i]+'\n')
    ff = open(out_file,"w")
    ff.writelines(out)
    ff.close()
data_utils.py 文件源码 项目:Caption-Generation 作者: m516825 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def build_w2v_matrix(vocab_processor, w2v_path, vector_path, dim_size):
    w2v_dict = {}
    f = open(vector_path, 'r')
    for line in f.readlines():
        word, vec = line.strip().split(' ', 1)
        w2v_dict[word] = np.loadtxt([vec], dtype='float32')

    vocab_list = vocab_processor._reverse_mapping
    w2v_W = np.zeros(shape=(len(vocab_list), dim_size), dtype='float32')

    for i, vocab in enumerate(vocab_list):
        # unknown vocab
        if i == 0:
            continue
        else:
            if vocab in w2v_dict:
                w2v_W[i] = w2v_dict[vocab]
            else:
                w2v_W[i] = get_unknown_word_vec(dim_size)

    cPickle.dump(w2v_W, open(w2v_path, 'wb'))

    return w2v_W
MeshIO.py 文件源码 项目:discretize 作者: simpeg 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def readModelUBC(mesh, fileName):
        """Read UBC OcTree model and get vector

        :param string fileName: path to the UBC GIF model file to read
        :rtype: numpy.ndarray
        :return: OcTree model
        """

        if type(fileName) is list:
            out = {}
            for f in fileName:
                out[f] = mesh.readModelUBC(f)
            return out

        assert hasattr(mesh, '_simpegReorderUBC'), 'The file must have been loaded from a UBC format.'
        assert mesh.dim == 3

        modList = []
        modArr = np.loadtxt(fileName)
        if len(modArr.shape) == 1:
            modList.append(modArr[mesh._simpegReorderUBC])
        else:
            modList.append(modArr[mesh._simpegReorderUBC, :])
        return modList
fem_mesh.py 文件源码 项目:fem 作者: mlp6 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def load_nodeIDs_coords(nodefile="nodes.dyn"):
    """load in node IDs and coordinates

    Exclude '*' keyword lines

    :param nodefile: node filename (nodes.dyn)
    :returns: nodeIDcoords (numpy array)
    """
    from numpy import loadtxt
    header_comment_skips = count_header_comment_skips(nodefile)
    nodeIDcoords = loadtxt(nodefile,
                           delimiter=',',
                           comments='*',
                           skiprows=header_comment_skips,
                           dtype=[('id', 'i4'), ('x', 'f4'), ('y', 'f4'),
                                  ('z', 'f4')])
    return nodeIDcoords
fem_mesh.py 文件源码 项目:fem 作者: mlp6 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def load_elems(elefile="elems.dyn"):
    """

    :param elefile: elems.dyn
    :return: elems
    """
    from numpy import loadtxt
    header_comment_skips = count_header_comment_skips(elefile)
    elems = loadtxt(elefile,
                    delimiter=',',
                    comments='*',
                    skiprows=header_comment_skips,
                    dtype=[('id', 'i4'), ('pid', 'i4'), ('n1', 'i4'),
                           ('n2', 'i4'), ('n3', 'i4'), ('n4', 'i4'),
                           ('n5', 'i4'), ('n6', 'i4'), ('n7', 'i4'),
                           ('n8', 'i4')])

    return elems


问题


面经


文章

微信
公众号

扫码关注公众号