python类double()的实例源码

invResults.py 文件源码 项目:BISIP 作者: clberube 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def logp_trace(model):
    """
    return a trace of logp for model
    """
    #init
    db = model.db
    n_samples = db.trace('deviance').length()
    logp = np.empty(n_samples, np.double)
    #loop over all samples
    for i_sample in range(n_samples):
        #set the value of all stochastic to their 'i_sample' value
        for stochastic in model.stochastics:
            try:
                value = db.trace(stochastic.__name__)[i_sample]
                stochastic.value = value

            except KeyError:
                print("No trace available for %s. " % stochastic.__name__)

        #get logp
        logp[i_sample] = model.logp
    return logp
invResults.py 文件源码 项目:BISIP 作者: clberube 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def logp_trace(model):
    """
    return a trace of logp for model
    """
    #init
    db = model.db
    n_samples = db.trace('deviance').length()
    logp = np.empty(n_samples, np.double)
    #loop over all samples
    for i_sample in range(n_samples):
        #set the value of all stochastic to their 'i_sample' value
        for stochastic in model.stochastics:
            try:
                value = db.trace(stochastic.__name__)[i_sample]
                stochastic.value = value

            except KeyError:
                print("No trace available for %s. " % stochastic.__name__)

        #get logp
        logp[i_sample] = model.logp
    return logp
rank_correlation.py 文件源码 项目:MetaphoricChange 作者: Garrafao 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def score_mod(gold, prediction, method):
    """
    Computes correlation coefficient for two lists of values.
    :param gold: list of gold values
    :param prediction: list of predicted values
    :param method: string, can be either of "pearson", "spearman" or "auc" (area under curve)
    :return: correlation coefficient and p-value
    """

    if len(gold) != len(prediction):
        raise ValueError("The two arrays must have the same length!")

    gold = np.array(gold, dtype=np.double)
    prediction = np.array(prediction, dtype=np.double)

    if method == "pearson":
        return pearson(gold, prediction)
    elif method == "spearman":
        return spearman(gold, prediction)
    elif method == "auc":
        return auc(gold, prediction)
    else:
        raise NotImplementedError("Unknown scoring measure:%s" % method)
test_multiarray.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_basic(self):
        dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
               np.longdouble, np.clongdouble]
        for dt in dts:
            c = np.ones(53, dtype=np.bool)
            assert_equal(np.where( c, dt(0), dt(1)), dt(0))
            assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
            assert_equal(np.where(True, dt(0), dt(1)), dt(0))
            assert_equal(np.where(False, dt(0), dt(1)), dt(1))
            d = np.ones_like(c).astype(dt)
            e = np.zeros_like(d)
            r = d.astype(dt)
            c[7] = False
            r[7] = e[7]
            assert_equal(np.where(c, e, e), e)
            assert_equal(np.where(c, d, e), r)
            assert_equal(np.where(c, d, e[0]), r)
            assert_equal(np.where(c, d[0], e), r)
            assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
            assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
            assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
            assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
            assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
            assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
            assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
test_linalg.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def do(self, a, b):
        d = linalg.det(a)
        (s, ld) = linalg.slogdet(a)
        if asarray(a).dtype.type in (single, double):
            ad = asarray(a).astype(double)
        else:
            ad = asarray(a).astype(cdouble)
        ev = linalg.eigvals(ad)
        assert_almost_equal(d, multiply.reduce(ev, axis=-1))
        assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))

        s = np.atleast_1d(s)
        ld = np.atleast_1d(ld)
        m = (s != 0)
        assert_almost_equal(np.abs(s[m]), 1)
        assert_equal(ld[~m], -inf)
test_linalg.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_UPLO(self):
        Klo = np.array([[0, 0], [1, 0]], dtype=np.double)
        Kup = np.array([[0, 1], [0, 0]], dtype=np.double)
        tgt = np.array([-1, 1], dtype=np.double)
        rtol = get_rtol(np.double)

        # Check default is 'L'
        w = np.linalg.eigvalsh(Klo)
        assert_allclose(w, tgt, rtol=rtol)
        # Check 'L'
        w = np.linalg.eigvalsh(Klo, UPLO='L')
        assert_allclose(w, tgt, rtol=rtol)
        # Check 'l'
        w = np.linalg.eigvalsh(Klo, UPLO='l')
        assert_allclose(w, tgt, rtol=rtol)
        # Check 'U'
        w = np.linalg.eigvalsh(Kup, UPLO='U')
        assert_allclose(w, tgt, rtol=rtol)
        # Check 'u'
        w = np.linalg.eigvalsh(Kup, UPLO='u')
        assert_allclose(w, tgt, rtol=rtol)
test_linalg.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_UPLO(self):
        Klo = np.array([[0, 0], [1, 0]], dtype=np.double)
        Kup = np.array([[0, 1], [0, 0]], dtype=np.double)
        tgt = np.array([-1, 1], dtype=np.double)
        rtol = get_rtol(np.double)

        # Check default is 'L'
        w, v = np.linalg.eigh(Klo)
        assert_allclose(w, tgt, rtol=rtol)
        # Check 'L'
        w, v = np.linalg.eigh(Klo, UPLO='L')
        assert_allclose(w, tgt, rtol=rtol)
        # Check 'l'
        w, v = np.linalg.eigh(Klo, UPLO='l')
        assert_allclose(w, tgt, rtol=rtol)
        # Check 'U'
        w, v = np.linalg.eigh(Kup, UPLO='U')
        assert_allclose(w, tgt, rtol=rtol)
        # Check 'u'
        w, v = np.linalg.eigh(Kup, UPLO='u')
        assert_allclose(w, tgt, rtol=rtol)
test_linalg.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def test_mode_raw(self):
        # The factorization is not unique and varies between libraries,
        # so it is not possible to check against known values. Functional
        # testing is a possibility, but awaits the exposure of more
        # of the functions in lapack_lite. Consequently, this test is
        # very limited in scope. Note that the results are in FORTRAN
        # order, hence the h arrays are transposed.
        a = array([[1, 2], [3, 4], [5, 6]], dtype=np.double)

        # Test double
        h, tau = linalg.qr(a, mode='raw')
        assert_(h.dtype == np.double)
        assert_(tau.dtype == np.double)
        assert_(h.shape == (2, 3))
        assert_(tau.shape == (2,))

        h, tau = linalg.qr(a.T, mode='raw')
        assert_(h.dtype == np.double)
        assert_(tau.dtype == np.double)
        assert_(h.shape == (3, 2))
        assert_(tau.shape == (2,))
env_converter.py 文件源码 项目:drl.pth 作者: seba-1511 项目源码 文件源码 阅读 47 收藏 0 点赞 0 评论 0
def __init__(self, env, shape, clip=10.0, update_freq=100):
        self.env = env
        self.clip = clip
        self.update_freq = update_freq
        self.count = 0
        self.sum = 0.0
        self.sum_sqr = 0.0
        self.mean = np.zeros(shape, dtype=np.double)
        self.std = np.ones(shape, dtype=np.double)
bench_stats.py 文件源码 项目:composability_bench 作者: IntelPython 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def prepare_default(N=100, dtype=np.double):
    return ( np.asarray(np.random.rand(N, N), dtype=dtype), )
    #return toc/trials, (4/3)*N*N*N*1e-9, times
bench_stats.py 文件源码 项目:composability_bench 作者: IntelPython 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def prepare_eig(N=100, dtype=np.double):
    N/=4
    return ( np.asarray(np.random.rand(int(N), int(N)), dtype=dtype), )
bench_stats.py 文件源码 项目:composability_bench 作者: IntelPython 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def prepare_svd(N=100, dtype=np.double):
    N/=2
    return ( np.asarray(np.random.rand(int(N), int(N)), dtype=dtype), False )

#det:    return toc/trials, N*N*N*1e-9, times
bench_stats.py 文件源码 项目:composability_bench 作者: IntelPython 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def prepare_dot(N=100, dtype=np.double):
    N=N*N*10
    A = np.asarray(np.random.rand(int(N)), dtype=dtype)
    return (A, A)
    #return 1.0*toc/(trials), 2*N*N*N*1e-9, times
bench_stats.py 文件源码 项目:composability_bench 作者: IntelPython 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def prepare_ivi(N=100, dtype=np.double):
    A = np.random.rand(3260, 3260)
    B = np.random.rand(3260, 3000)
    return (A, B)
    #return 1.0*toc/(trials), 2*N*N*N*1e-9, times
bench_stats.py 文件源码 项目:composability_bench 作者: IntelPython 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def prepare_cholesky(N=100, dtype=np.double):
    N = int(N*2)
    A = np.asarray(np.random.rand(N, N), dtype=dtype)
    return ( A*A.transpose() + N*np.eye(N), )
    #return toc/trials, N*N*N/3.0*1e-9, times

#inv:    return toc/trials, 2*N*N*N*1e-9, times


##################################################################################
gdal_array.py 文件源码 项目:gee-bridge 作者: francbartoli 项目源码 文件源码 阅读 56 收藏 0 点赞 0 评论 0
def TermProgress_nocb(*args, **kwargs):
  """TermProgress_nocb(double dfProgress, char const * pszMessage=None, void * pData=None) -> int"""
  return _gdal_array.TermProgress_nocb(*args, **kwargs)
gdal_array.py 文件源码 项目:gee-bridge 作者: francbartoli 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def BandRasterIONumPy(*args, **kwargs):
  """
    BandRasterIONumPy(Band band, int bWrite, double xoff, double yoff, double xsize, double ysize, PyArrayObject * psArray, 
        int buf_type, GDALRIOResampleAlg resample_alg, GDALProgressFunc callback=0, 
        void * callback_data=None) -> CPLErr
    """
  return _gdal_array.BandRasterIONumPy(*args, **kwargs)
gdal_array.py 文件源码 项目:gee-bridge 作者: francbartoli 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def RATWriteArray(rat, array, field, start=0):
    """
    Pure Python implementation of writing a chunk of the RAT
    from a numpy array. Type of array is coerced to one of the types
    (int, double, string) supported. Called from RasterAttributeTable.WriteArray
    """
    if array is None:
        raise ValueError("Expected array of dim 1")

    # if not the array type convert it to handle lists etc
    if not isinstance(array, numpy.ndarray):
        array = numpy.array(array)

    if array.ndim != 1:
        raise ValueError("Expected array of dim 1")

    if (start + array.size) > rat.GetRowCount():
        raise ValueError("Array too big to fit into RAT from start position")

    if numpy.issubdtype(array.dtype, numpy.integer):
        # is some type of integer - coerce to standard int
        # TODO: must check this is fine on all platforms
        # confusingly numpy.int 64 bit even if native type 32 bit
        array = array.astype(numpy.int32)
    elif numpy.issubdtype(array.dtype, numpy.floating):
        # is some type of floating point - coerce to double
        array = array.astype(numpy.double)
    elif numpy.issubdtype(array.dtype, numpy.character):
        # cast away any kind of Unicode etc
        array = array.astype(numpy.character)
    else:
        raise ValueError("Array not of a supported type (integer, double or string)")

    return RATValuesIONumPyWrite(rat, field, start, array)
test_nixio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_signals_write(self):
        block = Block()
        seg = Segment()
        block.segments.append(seg)

        asig = AnalogSignal(signal=self.rquant((10, 3), pq.mV),
                            sampling_rate=pq.Quantity(10, "Hz"))
        seg.analogsignals.append(asig)
        self.write_and_compare([block])

        anotherblock = Block("ir signal block")
        seg = Segment("ir signal seg")
        anotherblock.segments.append(seg)
        irsig = IrregularlySampledSignal(
            signal=np.random.random((20, 3)),
            times=self.rquant(20, pq.ms, True),
            units=pq.A
        )
        seg.irregularlysampledsignals.append(irsig)
        self.write_and_compare([anotherblock])

        block.segments[0].analogsignals.append(
            AnalogSignal(signal=[10.0, 1.0, 3.0], units=pq.S,
                         sampling_period=pq.Quantity(3, "s"),
                         dtype=np.double, name="signal42",
                         description="this is an analogsignal",
                         t_start=45 * pq.ms),
        )
        self.write_and_compare([block, anotherblock])

        block.segments[0].irregularlysampledsignals.append(
            IrregularlySampledSignal(times=np.random.random(10),
                                     signal=np.random.random((10, 3)),
                                     units="mV", time_units="s",
                                     dtype=np.float,
                                     name="some sort of signal",
                                     description="the signal is described")
        )
        self.write_and_compare([block, anotherblock])
brainwaresrcio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def __read_comment(self):
        """
        Read a single comment.

        The comment is stored as an Event in Segment 0, which is
        specifically for comments.

        ----------------------
        Returns an empty list.

        The returned object is already added to the Block.

        No ID number: always called from another method
        """

        # float64 -- timestamp (number of days since dec 30th 1899)
        time = np.fromfile(self._fsrc, dtype=np.double, count=1)[0]

        # int16 -- length of next string
        numchars1 = np.asscalar(np.fromfile(self._fsrc,
                                            dtype=np.int16, count=1))

        # char * numchars -- the one who sent the comment
        sender = self.__read_str(numchars1)

        # int16 -- length of next string
        numchars2 = np.asscalar(np.fromfile(self._fsrc,
                                            dtype=np.int16, count=1))

        # char * numchars -- comment text
        text = self.__read_str(numchars2, utf=False)

        comment = Event(times=pq.Quantity(time, units=pq.d), labels=text,
                        sender=sender, file_origin=self._file_origin)

        self._seg0.events.append(comment)

        return []
brainwaresrcio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __read_spiketrain_timestamped(self):
        """
        Read a SpikeTrain

        This SpikeTrain contains a time stamp for when it was recorded

        The timestamp is stored as an annotation in the SpikeTrain.

        -------------------------------------------------
        Returns a SpikeTrain object with multiple spikes.

        The returned object must be added to the Block.

        ID: 29110
        """

        # float64 -- timeStamp (number of days since dec 30th 1899)
        timestamp = np.fromfile(self._fsrc, dtype=np.double, count=1)[0]

        # convert to datetime object
        timestamp = self._convert_timestamp(timestamp)

        # seq_list -- spike list
        # combine the spikes into a single SpikeTrain
        spiketrain = self._combine_spiketrains(self.__read_list())

        # add the timestamp
        spiketrain.annotations['timestamp'] = timestamp

        return spiketrain
brainwaresrcio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __read_unit_list_timestamped(self):
        """
        A list of a list of Units.

        This is the same as __read_unit_list, except that it also has a
        timestamp.  This is added ad an annotation to all Units.

        -----------------------------------------------
        Returns a list of Units modified in the method.

        The returned objects are already added to the Block.

        ID: 29119
        """

        # double -- time zero (number of days since dec 30th 1899)
        timestamp = np.fromfile(self._fsrc, dtype=np.double, count=1)[0]

        # convert to to days since UNIX epoc time:
        timestamp = self._convert_timestamp(timestamp)

        # sorter -- this is based off a sorter
        units = self.__read_unit_list()

        for unit in units:
            unit.annotations['timestamp'].append(timestamp)

        return units
test_nixio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_signals_write(self):
        block = Block()
        seg = Segment()
        block.segments.append(seg)

        asig = AnalogSignal(signal=self.rquant((10, 3), pq.mV),
                            sampling_rate=pq.Quantity(10, "Hz"))
        seg.analogsignals.append(asig)
        self.write_and_compare([block])

        anotherblock = Block("ir signal block")
        seg = Segment("ir signal seg")
        anotherblock.segments.append(seg)
        irsig = IrregularlySampledSignal(
            signal=np.random.random((20, 3)),
            times=self.rquant(20, pq.ms, True),
            units=pq.A
        )
        seg.irregularlysampledsignals.append(irsig)
        self.write_and_compare([anotherblock])

        block.segments[0].analogsignals.append(
            AnalogSignal(signal=[10.0, 1.0, 3.0], units=pq.S,
                         sampling_period=pq.Quantity(3, "s"),
                         dtype=np.double, name="signal42",
                         description="this is an analogsignal",
                         t_start=45 * pq.ms),
        )
        self.write_and_compare([block, anotherblock])

        block.segments[0].irregularlysampledsignals.append(
            IrregularlySampledSignal(times=np.random.random(10),
                                     signal=np.random.random((10, 3)),
                                     units="mV", time_units="s",
                                     dtype=np.float,
                                     name="some sort of signal",
                                     description="the signal is described")
        )
        self.write_and_compare([block, anotherblock])
brainwaresrcio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __read_spiketrain_timestamped(self):
        """
        Read a SpikeTrain

        This SpikeTrain contains a time stamp for when it was recorded

        The timestamp is stored as an annotation in the SpikeTrain.

        -------------------------------------------------
        Returns a SpikeTrain object with multiple spikes.

        The returned object must be added to the Block.

        ID: 29110
        """

        # float64 -- timeStamp (number of days since dec 30th 1899)
        timestamp = np.fromfile(self._fsrc, dtype=np.double, count=1)[0]

        # convert to datetime object
        timestamp = self._convert_timestamp(timestamp)

        # seq_list -- spike list
        # combine the spikes into a single SpikeTrain
        spiketrain = self._combine_spiketrains(self.__read_list())

        # add the timestamp
        spiketrain.annotations['timestamp'] = timestamp

        return spiketrain
brainwaresrcio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __read_unit_list_timestamped(self):
        """
        A list of a list of Units.

        This is the same as __read_unit_list, except that it also has a
        timestamp.  This is added ad an annotation to all Units.

        -----------------------------------------------
        Returns a list of Units modified in the method.

        The returned objects are already added to the Block.

        ID: 29119
        """

        # double -- time zero (number of days since dec 30th 1899)
        timestamp = np.fromfile(self._fsrc, dtype=np.double, count=1)[0]

        # convert to to days since UNIX epoc time:
        timestamp = self._convert_timestamp(timestamp)

        # sorter -- this is based off a sorter
        units = self.__read_unit_list()

        for unit in units:
            unit.annotations['timestamp'].append(timestamp)

        return units
CpuUsage.py 文件源码 项目:supremm 作者: ubccr 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def computeallcpus(self):
        """ overall stats for all cores on the nodes """

        ratios = numpy.empty((self._ncpumetrics, self._totalcores), numpy.double)

        coreindex = 0
        for host, last in self._last.iteritems():
            try:
                elapsed = last - self._first[host]

                if numpy.amin(numpy.sum(elapsed, 0)) < 1.0:
                    # typically happens if the job was very short and the datapoints are too close together
                    return {"error": ProcessingError.JOB_TOO_SHORT}

                coresperhost = len(last[0, :])
                ratios[:, coreindex:(coreindex+coresperhost)] = 1.0 * elapsed / numpy.sum(elapsed, 0)
                coreindex += coresperhost
            except ValueError:
                # typically happens if the linux pmda crashes during the job
                return {"error": ProcessingError.INSUFFICIENT_DATA}

        results = {}
        for i, name in enumerate(self._outnames):
            results[name] = calculate_stats(ratios[i, :])

        results['all'] = {"cnt": self._totalcores}

        return results
CpuUsage.py 文件源码 项目:supremm 作者: ubccr 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def computejobcpus(self):
        """ stats for the cores on the nodes that were assigend to the job (if available) """

        proc = self._job.getdata('proc')

        if proc == None:
            return {"error": ProcessingError.CPUSET_UNKNOWN}, {"error": ProcessingError.CPUSET_UNKNOWN}

        cpusallowed = self._job.getdata('proc')['cpusallowed']

        ratios = numpy.empty((self._ncpumetrics, self._totalcores), numpy.double)

        coreindex = 0
        for host, last in self._last.iteritems():
            elapsed = last - self._first[host]
            if host in cpusallowed and 'error' not in cpusallowed[host]:
                elapsed = elapsed[:, cpusallowed[host]]
            else:
                return {"error": ProcessingError.CPUSET_UNKNOWN}, {"error": ProcessingError.CPUSET_UNKNOWN}

            coresperhost = len(elapsed[0, :])
            ratios[:, coreindex:(coreindex+coresperhost)] = 1.0 * elapsed / numpy.sum(elapsed, 0)
            coreindex += coresperhost

        allowedcores = numpy.array(ratios[:, :coreindex])

        results = {}
        for i, name in enumerate(self._outnames):
            results[name] = calculate_stats(allowedcores[i, :])

        results['all'] = {"cnt": coreindex}

        effective = numpy.compress(allowedcores[1, :] < 0.95, allowedcores , axis=1)
        effectiveresults = {
            'all': len(effective[i, :])
        }
        if effectiveresults['all'] > 0:
            for i, name in enumerate(self._outnames):
                effectiveresults[name] = calculate_stats(effective[i, :])

        return results, effectiveresults
Util.py 文件源码 项目:MLPractices 作者: carefree0910 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def quantize_data(x, y, wc=None, continuous_rate=0.1, separate=False):
        if isinstance(x, list):
            xt = map(list, zip(*x))
        else:
            xt = x.T
        features = [set(feat) for feat in xt]
        if wc is None:
            wc = np.array([len(feat) >= int(continuous_rate * len(y)) for feat in features])
        else:
            wc = np.asarray(wc)
        feat_dics = [{_l: i for i, _l in enumerate(feats)} if not wc[i] else None
                     for i, feats in enumerate(features)]
        if not separate:
            if np.all(~wc):
                dtype = np.int
            else:
                dtype = np.double
            x = np.array([[feat_dics[i][_l] if not wc[i] else _l for i, _l in enumerate(sample)]
                          for sample in x], dtype=dtype)
        else:
            x = np.array([[feat_dics[i][_l] if not wc[i] else _l for i, _l in enumerate(sample)]
                          for sample in x], dtype=np.double)
            x = (x[:, ~wc].astype(np.int), x[:, wc])
        label_dic = {_l: i for i, _l in enumerate(set(y))}
        y = np.array([label_dic[yy] for yy in y], dtype=np.int8)
        label_dic = {i: _l for _l, i in label_dic.items()}
        return x, y, wc, features, feat_dics, label_dic
test_torch.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 80 收藏 0 点赞 0 评论 0
def test_element_size(self):
        byte   =   torch.ByteStorage().element_size()
        char   =   torch.CharStorage().element_size()
        short  =  torch.ShortStorage().element_size()
        int    =    torch.IntStorage().element_size()
        long   =   torch.LongStorage().element_size()
        float  =  torch.FloatStorage().element_size()
        double = torch.DoubleStorage().element_size()

        self.assertEqual(byte,   torch.ByteTensor().element_size())
        self.assertEqual(char,   torch.CharTensor().element_size())
        self.assertEqual(short,  torch.ShortTensor().element_size())
        self.assertEqual(int,    torch.IntTensor().element_size())
        self.assertEqual(long,   torch.LongTensor().element_size())
        self.assertEqual(float,  torch.FloatTensor().element_size())
        self.assertEqual(double, torch.DoubleTensor().element_size())

        self.assertGreater(byte, 0)
        self.assertGreater(char, 0)
        self.assertGreater(short, 0)
        self.assertGreater(int, 0)
        self.assertGreater(long, 0)
        self.assertGreater(float, 0)
        self.assertGreater(double, 0)

        # These tests are portable, not necessarily strict for your system.
        self.assertEqual(byte, 1)
        self.assertEqual(char, 1)
        self.assertGreaterEqual(short, 2)
        self.assertGreaterEqual(int, 2)
        self.assertGreaterEqual(int, short)
        self.assertGreaterEqual(long, 4)
        self.assertGreaterEqual(long, int)
        self.assertGreaterEqual(double, float)
test_torch.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def test_from_numpy(self):
        dtypes = [
            np.double,
            np.float,
            np.int64,
            np.int32,
            np.uint8
        ]
        for dtype in dtypes:
            array = np.array([1, 2, 3, 4], dtype=dtype)
            self.assertEqual(torch.from_numpy(array), torch.Tensor([1, 2, 3, 4]))


问题


面经


文章

微信
公众号

扫码关注公众号