python类searchsorted()的实例源码

magnificationcurve.py 文件源码 项目:MulensModel 作者: rpoleski 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _methods_for_epochs(self):
        """
        for given epochs, decide which methods should be used to
        calculate magnification, but don't run the calculations
        """
        out = [self._default_method] * len(self.times)
        if self._methods_epochs is None:
            return out

        brackets = np.searchsorted(self._methods_epochs, self.times)
        n_max = len(self._methods_epochs)

        out = [self._methods_names[value - 1]
               if (value > 0 and value < n_max) else self._default_method
               for value in brackets]
        return out
colorimetry.py 文件源码 项目:prysm 作者: brandondube 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def normalize_spectrum(spectrum):
    ''' Normalizes a spectrum to have unit peak within the visible band.
    Args:
        spectrum (`Spectrum`): object with iterable wavelength, value fields.

    Returns:
        `Spectrum`: new spectrum object.

    '''
    wvl, vals = spectrum['wvl'], spectrum['values']
    low, high = np.searchsorted(wvl, 400), np.searchsorted(wvl, 700)
    vis_values_max = vals[low:high].max()
    return {
        'wvl': wvl,
        'values': vals / vis_values_max,
    }
setdiff.py 文件源码 项目:lps-anchor-pos-estimator 作者: bitcraze 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def setdiff(eq1, eq2):

    eq1, eq2 = eqsize(eq1, eq2)

    c1 = [None] * eq1.shape
    c2 = [None] * eq2.shape

    for i in range(0, eq1.size):

        c1.append[i] = hash(eq2[i])

    for i in range(0, eq2.size):

        c2[i] = hash(eq2[i])

    ia = np.delete(np.arange(np.alen(c1)), np.searchsorted(c1, c2))

    ia = (ia[:]).conj().T

    p = eq1[ia]

    return p, ia
densetimes.py 文件源码 项目:MOSFiT 作者: guillochon 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def process(self, **kwargs):
        """Process module."""
        self._rest_times = kwargs['rest_times']
        self._rest_t_explosion = kwargs[self.key('resttexplosion')]

        outputs = OrderedDict()
        max_times = max(self._rest_times)
        if max_times > self._rest_t_explosion:
            outputs['dense_times'] = np.unique(
                np.concatenate(([0.0], [
                    x + self._rest_t_explosion
                    for x in np.logspace(
                        self.L_T_MIN,
                        np.log10(max_times - self._rest_t_explosion),
                        num=self._n_times)
                ], self._rest_times)))
        else:
            outputs['dense_times'] = np.array(self._rest_times)
        outputs['dense_indices'] = np.searchsorted(
            outputs['dense_times'], self._rest_times)
        return outputs
losextinction.py 文件源码 项目:MOSFiT 作者: guillochon 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def mm83(self, nh, waves):
        """X-ray extinction in the ISM from Morisson & McCammon 1983."""
        y = np.array([self.H_C_CGS / (x * self.ANG_CGS * self.KEV_CGS)
                      for x in waves])
        i = np.array([np.searchsorted(self._mm83[:, 0], x) - 1 for x in y])
        al = [1.0e-24 * (self._mm83[x, 1] + self._mm83[x, 2] * y[j] +
                         self._mm83[x, 3] * y[j] ** 2) / y[j] ** 3
              for j, x in enumerate(i)]
        # For less than 0.03 keV assume cross-section scales as E^-3.
        # http://ned.ipac.caltech.edu/level5/Madau6/Madau1_2.html
        # See also Rumph, Boyer, & Vennes 1994.
        al = [al[j] if x < self._min_xray
              else self._almin * (self._min_xray / x) ** 3
              for j, x in enumerate(y)]
        al = [al[j] if x > self._max_xray
              else self._almax * (self._max_xray / x) ** 3
              for j, x in enumerate(y)]
        return nh * np.array(al)
dynamics.py 文件源码 项目:hienoi 作者: christophercrouzet 项目源码 文件源码 阅读 78 收藏 0 点赞 0 评论 0
def get_particle(self, id):
        """Retrieve a particle.

        Parameters
        ----------
        id : int
            ID of the particle to retrieve.

        Returns
        -------
        nani.Particle
            The particle found.
        """
        # PRECONDITION: `self._array.data` sorted by id.
        id = self._ATTR_ID_NUMPY_TYPE(id)
        idx = numpy.searchsorted(self._array.data['id'], id)
        if idx < len(self._array) and self._array.data[idx]['id'] == id:
            return self._nani.element_view(self._array.data[idx])

        raise ValueError("No particle found with ID '%d'." % (id,))
__init__.py 文件源码 项目:MIT-Thesis 作者: alec-heif 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __getitem__(self, index):
        inds = self.indices
        vals = self.values
        if not isinstance(index, int):
            raise TypeError(
                "Indices must be of type integer, got type %s" % type(index))

        if index >= self.size or index < -self.size:
            raise IndexError("Index %d out of bounds." % index)
        if index < 0:
            index += self.size

        if (inds.size == 0) or (index > inds.item(-1)):
            return 0.

        insert_index = np.searchsorted(inds, index)
        row_ind = inds[insert_index]
        if row_ind == index:
            return vals[insert_index]
        return 0.
__init__.py 文件源码 项目:MIT-Thesis 作者: alec-heif 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __getitem__(self, indices):
        i, j = indices
        if i < 0 or i >= self.numRows:
            raise IndexError("Row index %d is out of range [0, %d)"
                             % (i, self.numRows))
        if j < 0 or j >= self.numCols:
            raise IndexError("Column index %d is out of range [0, %d)"
                             % (j, self.numCols))

        # If a CSR matrix is given, then the row index should be searched
        # for in ColPtrs, and the column index should be searched for in the
        # corresponding slice obtained from rowIndices.
        if self.isTransposed:
            j, i = i, j

        colStart = self.colPtrs[j]
        colEnd = self.colPtrs[j + 1]
        nz = self.rowIndices[colStart: colEnd]
        ind = np.searchsorted(nz, i) + colStart
        if ind < colEnd and self.rowIndices[ind] == i:
            return self.values[ind]
        else:
            return 0.0
__init__.py 文件源码 项目:MIT-Thesis 作者: alec-heif 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __getitem__(self, index):
        inds = self.indices
        vals = self.values
        if not isinstance(index, int):
            raise TypeError(
                "Indices must be of type integer, got type %s" % type(index))

        if index >= self.size or index < -self.size:
            raise IndexError("Index %d out of bounds." % index)
        if index < 0:
            index += self.size

        if (inds.size == 0) or (index > inds.item(-1)):
            return 0.

        insert_index = np.searchsorted(inds, index)
        row_ind = inds[insert_index]
        if row_ind == index:
            return vals[insert_index]
        return 0.
__init__.py 文件源码 项目:MIT-Thesis 作者: alec-heif 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __getitem__(self, indices):
        i, j = indices
        if i < 0 or i >= self.numRows:
            raise IndexError("Row index %d is out of range [0, %d)"
                             % (i, self.numRows))
        if j < 0 or j >= self.numCols:
            raise IndexError("Column index %d is out of range [0, %d)"
                             % (j, self.numCols))

        # If a CSR matrix is given, then the row index should be searched
        # for in ColPtrs, and the column index should be searched for in the
        # corresponding slice obtained from rowIndices.
        if self.isTransposed:
            j, i = i, j

        colStart = self.colPtrs[j]
        colEnd = self.colPtrs[j + 1]
        nz = self.rowIndices[colStart: colEnd]
        ind = np.searchsorted(nz, i) + colStart
        if ind < colEnd and self.rowIndices[ind] == i:
            return self.values[ind]
        else:
            return 0.0
summary.py 文件源码 项目:django-corenlp 作者: arunchaganty 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def add(self, arr):
        if not isinstance(arr, np.ndarray):
            arr = np.array(arr)
        arr = arr.flatten()

        self.min = min(self.min, arr.min())
        self.max = max(self.max, arr.max())
        self.sum += arr.sum()
        self.num += len(arr)
        self.sum_squares += (arr ** 2).sum()

        indices = np.searchsorted(self.bucket_limits, arr, side='right')
        new_counts = np.bincount(indices, minlength=self.buckets.shape[0])
        if new_counts.shape[0] > self.buckets.shape[0]:
            # This should only happen with nans and extremely large values
            assert new_counts.shape[0] == self.buckets.shape[0] + 1, new_counts.shape
            new_counts = new_counts[:self.buckets.shape[0]]
        self.buckets += new_counts
summary.py 文件源码 项目:django-corenlp 作者: arunchaganty 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def add(self, arr):
        if not isinstance(arr, np.ndarray):
            arr = np.array(arr)
        arr = arr.flatten()

        self.min = min(self.min, arr.min())
        self.max = max(self.max, arr.max())
        self.sum += arr.sum()
        self.num += len(arr)
        self.sum_squares += (arr ** 2).sum()

        indices = np.searchsorted(self.bucket_limits, arr, side='right')
        new_counts = np.bincount(indices, minlength=self.buckets.shape[0])
        if new_counts.shape[0] > self.buckets.shape[0]:
            # This should only happen with nans and extremely large values
            assert new_counts.shape[0] == self.buckets.shape[0] + 1, new_counts.shape
            new_counts = new_counts[:self.buckets.shape[0]]
        self.buckets += new_counts
HARKinterpolation.py 文件源码 项目:HARK 作者: econ-ark 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _evaluate(self,x,y):
        '''
        Returns the level of the interpolated function at each value in x,y.
        Only called internally by HARKinterpolator2D.__call__ (etc).
        '''
        if _isscalar(x):
            y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
            alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
            f = (1-alpha)*self.xInterpolators[y_pos-1](x) + alpha*self.xInterpolators[y_pos](x)
        else:
            m = len(x)
            y_pos = np.searchsorted(self.y_list,y)
            y_pos[y_pos > self.y_n-1] = self.y_n-1
            y_pos[y_pos < 1] = 1
            f = np.zeros(m) + np.nan
            if y.size > 0:
                for i in xrange(1,self.y_n):
                    c = y_pos == i
                    if np.any(c):
                        alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])
                        f[c] = (1-alpha)*self.xInterpolators[i-1](x[c]) + alpha*self.xInterpolators[i](x[c]) 
        return f
HARKinterpolation.py 文件源码 项目:HARK 作者: econ-ark 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _derX(self,x,y):
        '''
        Returns the derivative with respect to x of the interpolated function
        at each value in x,y. Only called internally by HARKinterpolator2D.derivativeX.
        '''
        if _isscalar(x):
            y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
            alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
            dfdx = (1-alpha)*self.xInterpolators[y_pos-1]._der(x) + alpha*self.xInterpolators[y_pos]._der(x)
        else:
            m = len(x)
            y_pos = np.searchsorted(self.y_list,y)
            y_pos[y_pos > self.y_n-1] = self.y_n-1
            y_pos[y_pos < 1] = 1
            dfdx = np.zeros(m) + np.nan
            if y.size > 0:
                for i in xrange(1,self.y_n):
                    c = y_pos == i
                    if np.any(c):
                        alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])
                        dfdx[c] = (1-alpha)*self.xInterpolators[i-1]._der(x[c]) + alpha*self.xInterpolators[i]._der(x[c])
        return dfdx
HARKinterpolation.py 文件源码 项目:HARK 作者: econ-ark 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _derY(self,x,y):
        '''
        Returns the derivative with respect to y of the interpolated function
        at each value in x,y. Only called internally by HARKinterpolator2D.derivativeY.
        '''
        if _isscalar(x):
            y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
            dfdy = (self.xInterpolators[y_pos](x) - self.xInterpolators[y_pos-1](x))/(self.y_list[y_pos] - self.y_list[y_pos-1])
        else:
            m = len(x)
            y_pos = np.searchsorted(self.y_list,y)
            y_pos[y_pos > self.y_n-1] = self.y_n-1
            y_pos[y_pos < 1] = 1
            dfdy = np.zeros(m) + np.nan
            if y.size > 0:
                for i in xrange(1,self.y_n):
                    c = y_pos == i
                    if np.any(c):
                        dfdy[c] = (self.xInterpolators[i](x[c]) - self.xInterpolators[i-1](x[c]))/(self.y_list[i] - self.y_list[i-1])
        return dfdy
HARKinterpolation.py 文件源码 项目:HARK 作者: econ-ark 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _evaluate(self,x,y,z):
        '''
        Returns the level of the interpolated function at each value in x,y,z.
        Only called internally by HARKinterpolator3D.__call__ (etc).
        '''
        if _isscalar(x):
            z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
            alpha = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
            f = (1-alpha)*self.xyInterpolators[z_pos-1](x,y) + alpha*self.xyInterpolators[z_pos](x,y)
        else:
            m = len(x)
            z_pos = np.searchsorted(self.z_list,z)
            z_pos[z_pos > self.z_n-1] = self.z_n-1
            z_pos[z_pos < 1] = 1
            f = np.zeros(m) + np.nan
            if x.size > 0:
                for i in xrange(1,self.z_n):
                    c = z_pos == i
                    if np.any(c):
                        alpha = (z[c] - self.z_list[i-1])/(self.z_list[i] - self.z_list[i-1])
                        f[c] = (1-alpha)*self.xyInterpolators[i-1](x[c],y[c]) + alpha*self.xyInterpolators[i](x[c],y[c]) 
        return f
HARKinterpolation.py 文件源码 项目:HARK 作者: econ-ark 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _derX(self,x,y,z):
        '''
        Returns the derivative with respect to x of the interpolated function
        at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeX.
        '''
        if _isscalar(x):
            z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
            alpha = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
            dfdx = (1-alpha)*self.xyInterpolators[z_pos-1].derivativeX(x,y) + alpha*self.xyInterpolators[z_pos].derivativeX(x,y)
        else:
            m = len(x)
            z_pos = np.searchsorted(self.z_list,z)
            z_pos[z_pos > self.z_n-1] = self.z_n-1
            z_pos[z_pos < 1] = 1
            dfdx = np.zeros(m) + np.nan
            if x.size > 0:
                for i in xrange(1,self.z_n):
                    c = z_pos == i
                    if np.any(c):
                        alpha = (z[c] - self.z_list[i-1])/(self.z_list[i] - self.z_list[i-1])
                        dfdx[c] = (1-alpha)*self.xyInterpolators[i-1].derivativeX(x[c],y[c]) + alpha*self.xyInterpolators[i].derivativeX(x[c],y[c]) 
        return dfdx
HARKinterpolation.py 文件源码 项目:HARK 作者: econ-ark 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _derY(self,x,y,z):
        '''
        Returns the derivative with respect to y of the interpolated function
        at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeY.
        '''
        if _isscalar(x):
            z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
            alpha = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
            dfdy = (1-alpha)*self.xyInterpolators[z_pos-1].derivativeY(x,y) + alpha*self.xyInterpolators[z_pos].derivativeY(x,y)
        else:
            m = len(x)
            z_pos = np.searchsorted(self.z_list,z)
            z_pos[z_pos > self.z_n-1] = self.z_n-1
            z_pos[z_pos < 1] = 1
            dfdy = np.zeros(m) + np.nan
            if x.size > 0:
                for i in xrange(1,self.z_n):
                    c = z_pos == i
                    if np.any(c):
                        alpha = (z[c] - self.z_list[i-1])/(self.z_list[i] - self.z_list[i-1])
                        dfdy[c] = (1-alpha)*self.xyInterpolators[i-1].derivativeY(x[c],y[c]) + alpha*self.xyInterpolators[i].derivativeY(x[c],y[c]) 
        return dfdy
HARKinterpolation.py 文件源码 项目:HARK 作者: econ-ark 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _derZ(self,x,y,z):
        '''
        Returns the derivative with respect to z of the interpolated function
        at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeZ.
        '''
        if _isscalar(x):
            z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
            dfdz = (self.xyInterpolators[z_pos].derivativeX(x,y) - self.xyInterpolators[z_pos-1].derivativeX(x,y))/(self.z_list[z_pos] - self.z_list[z_pos-1])
        else:
            m = len(x)
            z_pos = np.searchsorted(self.z_list,z)
            z_pos[z_pos > self.z_n-1] = self.z_n-1
            z_pos[z_pos < 1] = 1
            dfdz = np.zeros(m) + np.nan
            if x.size > 0:
                for i in xrange(1,self.z_n):
                    c = z_pos == i
                    if np.any(c):
                        dfdz[c] = (self.xyInterpolators[i](x[c],y[c]) - self.xyInterpolators[i-1](x[c],y[c]))/(self.z_list[i] - self.z_list[i-1])
        return dfdz
ConsMarkovModel.py 文件源码 项目:HARK 作者: econ-ark 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def simBirth(self,which_agents):
        '''
        Makes new Markov consumer by drawing initial normalized assets, permanent income levels, and
        discrete states. Calls IndShockConsumerType.simBirth, then draws from initial Markov distribution.

        Parameters
        ----------
        which_agents : np.array(Bool)
            Boolean array of size self.AgentCount indicating which agents should be "born".

        Returns
        -------
        None
        '''
        IndShockConsumerType.simBirth(self,which_agents) # Get initial assets and permanent income
        N = np.sum(which_agents)
        base_draws = drawUniform(N,seed=self.RNG.randint(0,2**31-1))
        Cutoffs = np.cumsum(np.array(self.MrkvPrbsInit))
        self.MrkvNow[which_agents] = np.searchsorted(Cutoffs,base_draws).astype(int)
utils.py 文件源码 项目:brainiak 作者: brainiak 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def ecdf(x):
    """Empirical cumulative distribution function

    Given a 1D array of values, returns a function f(q) that outputs the
    fraction of values less than or equal to q.

    Parameters
    ----------
    x : 1D array
        values for which to compute CDF

    Returns
    ----------
    ecdf_fun: Callable[[float], float]
        function that returns the value of the CDF at a given point
    """
    xp = np.sort(x)
    yp = np.arange(len(xp) + 1) / len(xp)

    def ecdf_fun(q):
        return yp[np.searchsorted(xp, q, side="right")]

    return ecdf_fun
text_classifier.py 文件源码 项目:textar 作者: datosgobar 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def make_classifier(self, name, ids, labels):
        """Entrenar un clasificador SVM sobre los textos cargados.

        Crea un clasificador que se guarda en el objeto bajo el nombre `name`.

        Args:
            name (str): Nombre para el clasidicador.
            ids (list): Se espera una lista de N ids de textos ya almacenados
                en el TextClassifier.
            labels (list): Se espera una lista de N etiquetas. Una por cada id
                de texto presente en ids.
        Nota:
            Usa el clasificador de `Scikit-learn <http://scikit-learn.org/>`_
        """
        if not all(np.in1d(ids, self.ids)):
            raise ValueError("Hay ids de textos que no se encuentran \
                              almacenados.")
        setattr(self, name, SGDClassifier())
        classifier = getattr(self, name)
        indices = np.searchsorted(self.ids, ids)
        classifier.fit(self.tfidf_mat[indices, :], labels)
image.py 文件源码 项目:uncover-ml 作者: GeoscienceAustralia 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _global_lonlat2pix(self, lonlat):
        x = np.searchsorted(self._coords_x, lonlat[:, 0], side='right') - 1
        x = x.astype(int)
        ycoords = self._coords_y
        y = np.searchsorted(ycoords, lonlat[:, 1], side='right') - 1
        y = y.astype(int)

        # We want the *closed* interval, which means moving
        # points on the end back by 1
        on_end_x = lonlat[:, 0] == self._coords_x[-1]
        on_end_y = lonlat[:, 1] == self._coords_y[-1]
        x[on_end_x] -= 1
        y[on_end_y] -= 1
        if (not all(np.logical_and(x >= 0, x < self._full_res[0]))) or \
                (not all(np.logical_and(y >= 0, y < self._full_res[1]))):
            raise ValueError("Queried location is not "
                             "in the image {}!".format(self.source._filename))

        result = np.concatenate((x[:, np.newaxis], y[:, np.newaxis]), axis=1)
        return result

    # @contract(lonlat='array[Nx2](float64),N>0')
manual.py 文件源码 项目:Dragonfly 作者: duaneloh 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def next_frame(self, event=None):
        num = int(self.numstr.text())
        cnum = self.class_num.checkedId() - 1
        if cnum == -1:
            num += 1
        else:
            points = np.where(self.classes.key_pos == cnum)[0]
            index = np.searchsorted(points, num, side='left')
            if num in points:
                index += 1
            if index > len(points) - 1:
                index = len(points) - 1
            num = points[index]

        if num < self.parent.num_frames:
            self.numstr.setText(str(num))
            self.plot_frame()
plot_figures.py 文件源码 项目:IDNNs 作者: ravidziv 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def update_line_each_neuron(num, print_loss, Ix, axes, Iy, train_data, accuracy_test, epochs_bins, loss_train_data, loss_test_data, colors, epochsInds,
                            font_size = 18, axis_font = 16, x_lim = [0,12.2], y_lim=[0, 1.08],x_ticks = [], y_ticks = []):
    """Update the figure of the infomration plane for the movie"""
    #Print the line between the points
    axes[0].clear()
    if len(axes)>1:
        axes[1].clear()
    #Print the points
    for layer_num in range(Ix.shape[2]):
        for net_ind in range(Ix.shape[0]):
            axes[0].scatter(Ix[net_ind,num, layer_num], Iy[net_ind,num, layer_num], color = colors[layer_num], s = 35,edgecolors = 'black',alpha = 0.85)
    title_str = 'Information Plane - Epoch number - ' + str(epochsInds[num])
    utils.adjustAxes(axes[0], axis_font, title_str, x_ticks, y_ticks, x_lim, y_lim, set_xlabel=True, set_ylabel=True,
                     x_label='$I(X;T)$', y_label='$I(T;Y)$')
    #Print the loss function and the error
    if len(axes)>1:
        axes[1].plot(epochsInds[:num], 1 - np.mean(accuracy_test[:, :num], axis=0), color='g')
        if print_loss:
            axes[1].plot(epochsInds[:num], np.mean(loss_test_data[:, :num], axis=0), color='y')
        nereast_val = np.searchsorted(epochs_bins, epochsInds[num], side='right')
        axes[1].set_xlim([0,epochs_bins[nereast_val]])
        axes[1].legend(('Accuracy', 'Loss Function'), loc='best')
trading_calendar.py 文件源码 项目:catalyst 作者: enigmampc 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def minutes_window(self, start_dt, count):
        start_dt_nanos = start_dt.value
        all_minutes_nanos = self._trading_minutes_nanos
        start_idx = all_minutes_nanos.searchsorted(start_dt_nanos)

        # searchsorted finds the index of the minute **on or after** start_dt.
        # If the latter, push back to the prior minute.
        if all_minutes_nanos[start_idx] != start_dt_nanos:
            start_idx -= 1

        if start_idx < 0 or start_idx >= len(all_minutes_nanos):
            raise KeyError("Can't start minute window at {}".format(start_dt))

        end_idx = start_idx + count

        if start_idx > end_idx:
            return self.all_minutes[(end_idx + 1):(start_idx + 1)]
        else:
            return self.all_minutes[start_idx:end_idx]
datasets.py 文件源码 项目:importance-sampling 作者: idiap 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _encode(self, s, V, context):
        """
        Arguments
        ----------
            s: Sentence as a list of strings
            V: Vocabulary as a np array of strings
            context: The maximum length of previous words to include
        """
        idxs = np.searchsorted(V, s)
        x = np.zeros((len(s)-1, context), dtype=np.int32)
        y = np.zeros((len(s)-1, 1), np.int32)
        for i in range(1, len(s)):
            x[i-1, :i] = idxs[:i][-context:] + 1  # 0 means missing value
            y[i-1] = idxs[i]

        return x, y
datasets.py 文件源码 项目:importance-sampling 作者: idiap 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _encode(self, s, V, context):
        """
        Arguments
        ----------
            s: Sentence as a list of strings
            V: Vocabulary as a np array of strings
            context: The maximum length of previous words to include
        """
        idxs = np.searchsorted(V, s)
        x = np.zeros((len(s)-1, context), dtype=np.int32)
        y = np.zeros((len(s)-1, 1), np.int32)
        for i in range(1, len(s)):
            x[i-1, :i] = idxs[:i][-context:] + 1  # 0 means missing value
            y[i-1] = idxs[i]

        return x, y
datasets.py 文件源码 项目:importance-sampling 作者: idiap 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _encode(self, s, V, context):
        """
        Arguments
        ----------
            s: Sentence as a list of strings
            V: Vocabulary as a np array of strings
            context: The maximum length of previous words to include
        """
        idxs = np.searchsorted(V, s)
        x = np.zeros((len(s)-1, context), dtype=np.int32)
        y = np.zeros((len(s)-1, 1), np.int32)
        for i in range(1, len(s)):
            x[i-1, :i] = idxs[:i][-context:] + 1  # 0 means missing value
            y[i-1] = idxs[i]

        return x, y
datasets.py 文件源码 项目:importance-sampling 作者: idiap 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _encode(self, s, V, context):
        """
        Arguments
        ----------
            s: Sentence as a list of strings
            V: Vocabulary as a np array of strings
            context: The maximum length of previous words to include
        """
        idxs = np.searchsorted(V, s)
        x = np.zeros((len(s)-1, context), dtype=np.int32)
        y = np.zeros((len(s)-1, 1), np.int32)
        for i in range(1, len(s)):
            x[i-1, :i] = idxs[:i][-context:] + 1  # 0 means missing value
            y[i-1] = idxs[i]

        return x, y


问题


面经


文章

微信
公众号

扫码关注公众号