python类isscalar()的实例源码

constraints_handler.py 文件源码 项目:pycma 作者: CMA-ES 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def repair(self, x, copy_if_changed=True):
        """sets out-of-bounds components of ``x`` on the bounds.

        """
        # TODO (old data): CPU(N,lam,iter=20,200,100): 3.3s of 8s for two bounds, 1.8s of 6.5s for one bound
        # remark: np.max([bounds[0], x]) is about 40 times slower than max((bounds[0], x))
        copy = copy_if_changed
        bounds = self.bounds
        if bounds not in (None, [None, None], (None, None)):  # solely for effiency
            if copy:
                x = np.array(x, copy=True)
            if bounds[0] is not None:
                if np.isscalar(bounds[0]):
                    for i in rglen(x):
                        x[i] = max((bounds[0], x[i]))
                else:
                    for i in rglen(x):
                        j = min([i, len(bounds[0]) - 1])
                        if bounds[0][j] is not None:
                            x[i] = max((bounds[0][j], x[i]))
            if bounds[1] is not None:
                if np.isscalar(bounds[1]):
                    for i in rglen(x):
                        x[i] = min((bounds[1], x[i]))
                else:
                    for i in rglen(x):
                        j = min((i, len(bounds[1]) - 1))
                        if bounds[1][j] is not None:
                            x[i] = min((bounds[1][j], x[i]))
        return x

    # ____________________________________________________________
    #
constraints_handler.py 文件源码 项目:pycma 作者: CMA-ES 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __call__(self, x, archive, gp):
        """returns the boundary violation penalty for `x`,
        where `x` is a single solution or a list or np.array of solutions.

        """
        if x in (None, (), []):
            return x
        if self.bounds in (None, [None, None], (None, None)):
            return 0.0 if np.isscalar(x[0]) else [0.0] * len(x)  # no penalty

        x_is_single_vector = np.isscalar(x[0])
        if x_is_single_vector:
            x = [x]

        # add fixed variables to self.gamma
        try:
            gamma = list(self.gamma)  # fails if self.gamma is a scalar
            for i in sorted(gp.fixed_values):  # fails if fixed_values is None
                gamma.insert(i, 0.0)
            gamma = np.array(gamma, copy=False)
        except TypeError:
            gamma = self.gamma
        pen = []
        for xi in x:
            # CAVE: this does not work with already repaired values!!
            # CPU(N,lam,iter=20,200,100)?: 3s of 10s, np.array(xi): 1s
            # remark: one deep copy can be prevented by xold = xi first
            xpheno = gp.pheno(archive[xi]['geno'])
            # necessary, because xi was repaired to be in bounds
            xinbounds = self.repair(xpheno)
            # could be omitted (with unpredictable effect in case of external repair)
            fac = 1  # exp(0.1 * (log(self.scal) - np.mean(self.scal)))
            pen.append(sum(gamma * ((xinbounds - xpheno) / fac)**2) / len(xi))
        return pen[0] if x_is_single_vector else pen

    # ____________________________________________________________
    #
sampler.py 文件源码 项目:pycma 作者: CMA-ES 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def multiply_C(self, factor):
        """multiply ``self.C`` with ``factor`` updating internal states.

        ``factor`` can be a scalar, a vector or a matrix. The vector
        is used as outer product and multiplied element-wise, i.e.,
        ``multiply_C(diag(C)**-0.5)`` generates a correlation matrix.

        Details:
        """
        self._updateC()
        if np.isscalar(factor):
            self.C *= factor
            self.D *= factor**0.5
            try:
                self.inverse_root_C /= factor**0.5
            except AttributeError:
                pass
        elif len(np.asarray(factor).shape) == 1:
            self.C *= np.outer(factor, factor)
            self._decompose_C()
        elif len(factor.shape) == 2:
            self.C *= factor
            self._decompose_C()
        else:
            raise ValueError(str(factor))
        # raise NotImplementedError('never tested')
fitness_transformations.py 文件源码 项目:pycma 作者: CMA-ES 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __call__(self, x, *args):
        f = Function.__call__(self, x, *args)
        if self.rel_noise:
            f += f * self.rel_noise(len(x))
            assert np.isscalar(f)
        if self.abs_noise:
            f += self.abs_noise(len(x))
        return f
math.py 文件源码 项目:pycma 作者: CMA-ES 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def max(vec, vec_or_scalar):
        b = vec_or_scalar
        if np.isscalar(b):
            m = [max(x, b) for x in vec]
        else:
            m = [max(vec[i], b[i]) for i in rglen((vec))]
        return m
math.py 文件源码 项目:pycma 作者: CMA-ES 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def min(a, b):
        iss = np.isscalar
        if iss(a) and iss(b):
            return min(a, b)
        if iss(a):
            a, b = b, a
        # now only b can be still a scalar
        if iss(b):
            return [min(x, b) for x in a]
        else:  # two non-scalars must have the same length
            return [min(a[i], b[i]) for i in rglen((a))]
utils.py 文件源码 项目:pycma 作者: CMA-ES 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def is_vector_list(x):
    """make an educated guess whether ``x`` is a list of vectors.

    >>> from cma.utilities.utils import is_vector_list as ivl
    >>> assert ivl([[0], [0]]) and not ivl([1,2,3])

    """
    try:
        return np.isscalar(x[0][0])
    except:
        return False
x2num.py 文件源码 项目:tensorboard 作者: dmlc 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def makenp(x, modality=None):
    # if already numpy, return
    if isinstance(x, np.ndarray):
        if modality == 'IMG' and x.dtype == np.uint8:
            return x.astype(np.float32) / 255.0
        return x
    if np.isscalar(x):
        return np.array([x])
    if 'torch' in str(type(x)):
        return pytorch_np(x, modality)
    if 'chainer' in str(type(x)):
        return chainer_np(x, modality)
    if 'mxnet' in str(type(x)):
        return mxnet_np(x, modality)
ScatterPlotItem.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def drawSymbol(painter, symbol, size, pen, brush):
    if symbol is None:
        return
    painter.scale(size, size)
    painter.setPen(pen)
    painter.setBrush(brush)
    if isinstance(symbol, basestring):
        symbol = Symbols[symbol]
    if np.isscalar(symbol):
        symbol = list(Symbols.values())[symbol % len(Symbols)]
    painter.drawPath(symbol)
functions.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def gaussianFilter(data, sigma):
    """
    Drop-in replacement for scipy.ndimage.gaussian_filter.

    (note: results are only approximately equal to the output of
     gaussian_filter)
    """
    if np.isscalar(sigma):
        sigma = (sigma,) * data.ndim

    baseline = data.mean()
    filtered = data - baseline
    for ax in range(data.ndim):
        s = sigma[ax]
        if s == 0:
            continue

        # generate 1D gaussian kernel
        ksize = int(s * 6)
        x = np.arange(-ksize, ksize)
        kernel = np.exp(-x**2 / (2*s**2))
        kshape = [1,] * data.ndim
        kshape[ax] = len(kernel)
        kernel = kernel.reshape(kshape)

        # convolve as product of FFTs
        shape = data.shape[ax] + ksize
        scale = 1.0 / (abs(s) * (2*np.pi)**0.5)
        filtered = scale * np.fft.irfft(np.fft.rfft(filtered, shape, axis=ax) * 
                                        np.fft.rfft(kernel, shape, axis=ax), 
                                        axis=ax)

        # clip off extra data
        sl = [slice(None)] * data.ndim
        sl[ax] = slice(filtered.shape[ax]-data.shape[ax],None,None)
        filtered = filtered[sl]
    return filtered + baseline
ScatterPlotItem.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def drawSymbol(painter, symbol, size, pen, brush):
    if symbol is None:
        return
    painter.scale(size, size)
    painter.setPen(pen)
    painter.setBrush(brush)
    if isinstance(symbol, basestring):
        symbol = Symbols[symbol]
    if np.isscalar(symbol):
        symbol = list(Symbols.values())[symbol % len(Symbols)]
    painter.drawPath(symbol)
functions.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def gaussianFilter(data, sigma):
    """
    Drop-in replacement for scipy.ndimage.gaussian_filter.

    (note: results are only approximately equal to the output of
     gaussian_filter)
    """
    if np.isscalar(sigma):
        sigma = (sigma,) * data.ndim

    baseline = data.mean()
    filtered = data - baseline
    for ax in range(data.ndim):
        s = sigma[ax]
        if s == 0:
            continue

        # generate 1D gaussian kernel
        ksize = int(s * 6)
        x = np.arange(-ksize, ksize)
        kernel = np.exp(-x**2 / (2*s**2))
        kshape = [1,] * data.ndim
        kshape[ax] = len(kernel)
        kernel = kernel.reshape(kshape)

        # convolve as product of FFTs
        shape = data.shape[ax] + ksize
        scale = 1.0 / (abs(s) * (2*np.pi)**0.5)
        filtered = scale * np.fft.irfft(np.fft.rfft(filtered, shape, axis=ax) * 
                                        np.fft.rfft(kernel, shape, axis=ax), 
                                        axis=ax)

        # clip off extra data
        sl = [slice(None)] * data.ndim
        sl[ax] = slice(filtered.shape[ax]-data.shape[ax],None,None)
        filtered = filtered[sl]
    return filtered + baseline
layers.py 文件源码 项目:comprehend 作者: Fenugreek 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def stimuli(self, layer=-1, location=[.5], corrsort=True, activation=1.0,
                static_hidden=True, overlay=None):

        if np.isscalar(location): location = [location]
        coders = self.coders
        if layer < 0: layer += len(coders)
        out_shape = coders[layer].output_shape(reduced=False)
        n_hidden = out_shape[-1]

        values = np.zeros([n_hidden] + list(out_shape[1:]),
                          dtype=self.dtype.as_numpy_dtype)

        mid_indices = [0 for j in range(len(out_shape) - 2)]
        for i in range(n_hidden):
            for loc in location:
                if len(mid_indices):
                    mid_indices[0] = int(out_shape[1] * loc)
                indices = [i] + mid_indices + [i]
                values[tuple(indices)] = activation                   

        self.set_batch_size(n_hidden)
        values = coders[layer].get_reconstructed_input(values, reduced=False, overlay=overlay,
                                                       static_hidden=static_hidden)
        for i in range(layer - 1, -1, -1):
            if coders[i].output_shape() != coders[i+1].input_shape():
                values = tf.reshape(values, coders[i].output_shape())
            values = coders[i].get_reconstructed_input(values, reduced=True, overlay=overlay,
                                                       static_hidden=static_hidden)

        values = values.eval().squeeze()
        if corrsort: return values[features.corrsort(values, use_tsp=True)]
        else: return values
networks.py 文件源码 项目:comprehend 作者: Fenugreek 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _make_overlay(self, location):
        if np.isscalar(location): location = [location]
        overlay = np.zeros(self.shapes[2], np.bool)
        for loc in location:
            overlay[:, :, loc, ...] = True
        return overlay
networks.py 文件源码 项目:comprehend 作者: Fenugreek 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_reconstructed_input(self, hidden, reduced=False, overlay=None,
                                static_hidden=False, scale=True, **kwargs):
        """
        overlay mask holds positions of max indices (when max pooling was done).
        If None, use previous state where possible.
        If None, and no previous state, assign random positions.
        If scalar, set max indices to this.
        If list, put in multiple positions (optionally divide by pool_width if <scale>).

        Same random position is assigned to every hidden
        """        

        if not reduced:
            return Conv.get_reconstructed_input(self, hidden)

        hidden = tf.tile(tf.expand_dims(hidden, 3),
                         [1, 1, self.pool_width, 1, 1])

        if overlay is None:
            overlay = self.state.get('overlay')
            if overlay is None:
                overlay = self._random_overlay(static_hidden=static_hidden)
        elif np.isscalar(overlay) or type(overlay) == list:
            if scale and type(overlay) == list and len(overlay) > 1:
                scale = 1. / len(overlay)
            else: scale = None
            overlay = self._make_overlay(overlay)

        return Conv.get_reconstructed_input(self,
                                  self._pool_overlay(hidden, overlay), scale=scale)
tsbitmapper.py 文件源码 项目:tsbitmaps 作者: binhmop 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _get_num_bins(self, bins):
        if np.isscalar(bins):
            num_bins = bins
        else:
            num_bins = len(bins)  # bins is an array of bins
        return num_bins
dc_stat_think.py 文件源码 项目:dc_stat_think 作者: justinbois 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def ecdf_formal(x, data):
    """
    Compute the values of the formal ECDF generated from `data` at x.
    I.e., if F is the ECDF, return F(x).

    Parameters
    ----------
    x : int, float, or array_like
        Positions at which the formal ECDF is to be evaluated.
    data : array_like
        One-dimensional array of data to use to generate the ECDF.

    Returns
    -------
    output : float or ndarray
        Value of the ECDF at `x`.
    """
    # Remember if the input was scalar
    if np.isscalar(x):
        return_scalar = True
    else:
        return_scalar = False

    # If x has any nans, raise a RuntimeError
    if np.isnan(x).any():
        raise RuntimeError('Input cannot have NaNs.')

    # Convert x to array
    x = _convert_data(x, inf_ok=True)

    # Convert data to sorted NumPy array with no nan's
    data = _convert_data(data, inf_ok=True)

    # Compute formal ECDF value
    out = _ecdf_formal(x, np.sort(data))

    if return_scalar:
        return out[0]
    return out
mparray.py 文件源码 项目:mpnum 作者: dseuss 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __mul__(self, fact):
        """Multiply ``MPArray`` by a scalar.
          .. todo::  These could be made more stable by rescaling all
            non-normalized tens
        """
        if np.isscalar(fact):
            lcanon, rcanon = self.canonical_form
            ltens = self._lt
            ltens_new = it.chain(ltens[:lcanon], [fact * ltens[lcanon]],
                                 ltens[lcanon + 1:])
            return type(self)(LocalTensors(ltens_new, cform=(lcanon, rcanon)))

        raise NotImplementedError("Multiplication by non-scalar not supported")
mparray.py 文件源码 项目:mpnum 作者: dseuss 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def __imul__(self, fact):
        if np.isscalar(fact):
            lcanon, _ = self.canonical_form
            # FIXME TEMPORARY FIX
            #  self._lt[lcanon] *= fact
            self._lt.update(lcanon, self._lt[lcanon] * fact)
            return self

        raise NotImplementedError("Multiplication by non-scalar not supported")
mparray.py 文件源码 项目:mpnum 作者: dseuss 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __truediv__(self, divisor):
        if np.isscalar(divisor):
            return self.__mul__(1 / divisor)
        raise NotImplementedError("Division by non-scalar not supported")


问题


面经


文章

微信
公众号

扫码关注公众号