python类column_stack()的实例源码

importers.py 文件源码 项目:semi-auto-anno 作者: moberweger 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def depthToPCL(dpt, T, background_val=0.):

        # get valid points and transform
        pts = np.asarray(np.where(~np.isclose(dpt, background_val))).transpose()
        pts = np.concatenate([pts[:, [1, 0]], np.ones((pts.shape[0], 1), dtype='float32')], axis=1)
        pts = np.dot(np.linalg.inv(np.asarray(T)), pts.T).T
        pts = (pts[:, 0:2] / pts[:, 2][:, None]).reshape((pts.shape[0], 2))

        # replace the invalid data
        depth = dpt[np.where(~np.isclose(dpt, background_val))]

        # get x and y data in a vectorized way
        row = (pts[:, 0] - 320.) / 460. * depth
        col = (pts[:, 1] - 240.) / 460. * depth

        # combine x,y,depth
        return np.column_stack((row, col, depth))
LogisticRegression.py 文件源码 项目:tinyml 作者: parasdahal 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def predict(self, data, prob=False):
        """Computes the logistic probability of being a positive example

        Parameters
        ----------
        data : ndarray (n-rows,n-features)
            Test data to score using the current weights
        prob : Boolean
            If set to true, probability will be returned, else binary classification
        Returns
        -------
        0 or 1: int
            0 if probablity is less than 0.5, else 1
        """
        data = np.column_stack((np.ones(data.shape[0]), data))

        hypothesis = LogisticRegression.sigmoid(np.dot(data, self.theta))
        if not prob:
            return np.where(hypothesis >= .5, 1, 0)
        return hypothesis
ex3-self_learning_quant.py 文件源码 项目:sl-quant 作者: danielzak 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def init_state(indata, test=False):
    close = indata['close'].values
    diff = np.diff(close)
    diff = np.insert(diff, 0, 0)
    sma15 = SMA(indata, timeperiod=15)
    sma60 = SMA(indata, timeperiod=60)
    rsi = RSI(indata, timeperiod=14)
    atr = ATR(indata, timeperiod=14)

    #--- Preprocess data
    xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))

    xdata = np.nan_to_num(xdata)
    if test == False:
        scaler = preprocessing.StandardScaler()
        xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
        joblib.dump(scaler, 'data/scaler.pkl')
    elif test == True:
        scaler = joblib.load('data/scaler.pkl')
        xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
    state = xdata[0:1, 0:1, :]

    return state, xdata, close

#Take Action
ex1-self_learning_quant.py 文件源码 项目:sl-quant 作者: danielzak 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def init_state(data):

    close = data
    diff = np.diff(data)
    diff = np.insert(diff, 0, 0)

    #--- Preprocess data
    xdata = np.column_stack((close, diff))
    xdata = np.nan_to_num(xdata)
    scaler = preprocessing.StandardScaler()
    xdata = scaler.fit_transform(xdata)

    state = xdata[0:1, :]
    return state, xdata

#Take Action
ex2-self_learning_quant.py 文件源码 项目:sl-quant 作者: danielzak 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def init_state(data):

    close = data
    diff = np.diff(data)
    diff = np.insert(diff, 0, 0)

    #--- Preprocess data
    xdata = np.column_stack((close, diff))
    xdata = np.nan_to_num(xdata)
    scaler = preprocessing.StandardScaler()
    xdata = scaler.fit_transform(xdata)

    state = xdata[0:1, :]
    return state, xdata

#Take Action
matutils.py 文件源码 项目:topical_word_embeddings 作者: thunlp 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def corpus2dense(corpus, num_terms, num_docs=None, dtype=numpy.float32):
    """
    Convert corpus into a dense numpy array (documents will be columns). You
    must supply the number of features `num_terms`, because dimensionality
    cannot be deduced from the sparse vectors alone.

    You can optionally supply `num_docs` (=the corpus length) as well, so that
    a more memory-efficient code path is taken.

    This is the mirror function to `Dense2Corpus`.

    """
    if num_docs is not None:
        # we know the number of documents => don't bother column_stacking
        docno, result = -1, numpy.empty((num_terms, num_docs), dtype=dtype)
        for docno, doc in enumerate(corpus):
            result[:, docno] = sparse2full(doc, num_terms)
        assert docno + 1 == num_docs
    else:
        result = numpy.column_stack(sparse2full(doc, num_terms) for doc in corpus)
    return result.astype(dtype)
matutils.py 文件源码 项目:topical_word_embeddings 作者: thunlp 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def corpus2dense(corpus, num_terms, num_docs=None, dtype=numpy.float32):
    """
    Convert corpus into a dense numpy array (documents will be columns). You
    must supply the number of features `num_terms`, because dimensionality
    cannot be deduced from the sparse vectors alone.

    You can optionally supply `num_docs` (=the corpus length) as well, so that
    a more memory-efficient code path is taken.

    This is the mirror function to `Dense2Corpus`.

    """
    if num_docs is not None:
        # we know the number of documents => don't bother column_stacking
        docno, result = -1, numpy.empty((num_terms, num_docs), dtype=dtype)
        for docno, doc in enumerate(corpus):
            result[:, docno] = sparse2full(doc, num_terms)
        assert docno + 1 == num_docs
    else:
        result = numpy.column_stack(sparse2full(doc, num_terms) for doc in corpus)
    return result.astype(dtype)
matutils.py 文件源码 项目:topical_word_embeddings 作者: thunlp 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def corpus2dense(corpus, num_terms, num_docs=None, dtype=numpy.float32):
    """
    Convert corpus into a dense numpy array (documents will be columns). You
    must supply the number of features `num_terms`, because dimensionality
    cannot be deduced from the sparse vectors alone.

    You can optionally supply `num_docs` (=the corpus length) as well, so that
    a more memory-efficient code path is taken.

    This is the mirror function to `Dense2Corpus`.

    """
    if num_docs is not None:
        # we know the number of documents => don't bother column_stacking
        docno, result = -1, numpy.empty((num_terms, num_docs), dtype=dtype)
        for docno, doc in enumerate(corpus):
            result[:, docno] = sparse2full(doc, num_terms)
        assert docno + 1 == num_docs
    else:
        result = numpy.column_stack(sparse2full(doc, num_terms) for doc in corpus)
    return result.astype(dtype)
plot.py 文件源码 项目:tensorflow_end2end_speech_recognition 作者: hirofumi0810 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def plot_loss(train_losses, dev_losses, steps, save_path):
    """Save history of training & dev loss as figure.
    Args:
        train_losses (list): train losses
        dev_losses (list): dev losses
        steps (list): steps
    """
    # Save as csv file
    loss_graph = np.column_stack((steps, train_losses, dev_losses))
    if os.path.isfile(os.path.join(save_path, "ler.csv")):
        os.remove(os.path.join(save_path, "ler.csv"))
    np.savetxt(os.path.join(save_path, "loss.csv"), loss_graph, delimiter=",")

    # TODO: error check for inf loss

    # Plot & save as png file
    plt.clf()
    plt.plot(steps, train_losses, blue, label="Train")
    plt.plot(steps, dev_losses, orange, label="Dev")
    plt.xlabel('step', fontsize=12)
    plt.ylabel('loss', fontsize=12)
    plt.legend(loc="upper right", fontsize=12)
    if os.path.isfile(os.path.join(save_path, "loss.png")):
        os.remove(os.path.join(save_path, "loss.png"))
    plt.savefig(os.path.join(save_path, "loss.png"), dvi=500)
models.py 文件源码 项目:orange3-timeseries 作者: biolab 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _predict_as_table(self, prediction, confidence):
        from Orange.data import Domain, ContinuousVariable
        means, lows, highs = [], [], []
        n_vars = prediction.shape[2] if len(prediction.shape) > 2 else 1
        for i, name in zip(range(n_vars),
                           self._table_var_names or range(n_vars)):
            mean = ContinuousVariable('{} (forecast)'.format(name))
            low = ContinuousVariable('{} ({:d}%CI low)'.format(name, confidence))
            high = ContinuousVariable('{} ({:d}%CI high)'.format(name, confidence))
            low.ci_percent = high.ci_percent = confidence
            mean.ci_attrs = (low, high)
            means.append(mean)
            lows.append(low)
            highs.append(high)
        domain = Domain(means + lows + highs)
        X = np.column_stack(prediction)
        table = Timeseries.from_numpy(domain, X)
        table.name = (self._table_name or '') + '({} forecast)'.format(self)
        return table
utils.py 文件源码 项目:elfi 作者: elfi-dev 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def distance_as_discrepancy(dist, *summaries, observed):
    """Evaluate a distance function with signature `dist(summaries, observed)` in ELFI."""
    summaries = np.column_stack(summaries)
    # Ensure observed are 2d
    observed = np.concatenate([np.atleast_2d(o) for o in observed], axis=1)
    try:
        d = dist(summaries, observed)
    except ValueError as e:
        raise ValueError('Incompatible data shape for the distance node. Please check '
                         'summary (XA) and observed (XB) output data dimensions. They '
                         'have to be at most 2d. Especially ensure that summary nodes '
                         'outputs 2d data even with batch_size=1. Original error message '
                         'was: {}'.format(e))
    if d.ndim == 2 and d.shape[1] == 1:
        d = d.reshape(-1)
    return d
parameter_inference.py 文件源码 项目:elfi 作者: elfi-dev 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _compute_weights_and_cov(self, pop):
        params = np.column_stack(tuple([pop.outputs[p] for p in self.parameter_names]))

        if self._populations:
            q_logpdf = GMDistribution.logpdf(params, *self._gm_params)
            p_logpdf = self._prior.logpdf(params)
            w = np.exp(p_logpdf - q_logpdf)
        else:
            w = np.ones(pop.n_samples)

        if np.count_nonzero(w) == 0:
            raise RuntimeError("All sample weights are zero. If you are using a prior "
                               "with a bounded support, this may be caused by specifying "
                               "a too small sample size.")

        # New covariance
        cov = 2 * np.diag(weighted_var(params, w))

        if not np.all(np.isfinite(cov)):
            logger.warning("Could not estimate the sample covariance. This is often "
                           "caused by majority of the sample weights becoming zero."
                           "Falling back to using unit covariance.")
            cov = np.diag(np.ones(params.shape[1]))

        return w, cov
io.py 文件源码 项目:yt 作者: yt-project 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _initialize_index(self, data_file, regions):
        # self.fields[g.id][fname] is the pattern here
        morton = []
        for ptype in self.ds.particle_types_raw:
            try:
                pos = np.column_stack(self.fields[data_file.filename][
                    (ptype, "particle_position_%s" % ax)] for ax in 'xyz')
            except KeyError:
                pos = self.fields[data_file.filename][ptype, "particle_position"]
            if np.any(pos.min(axis=0) < data_file.ds.domain_left_edge) or \
               np.any(pos.max(axis=0) > data_file.ds.domain_right_edge):
                raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
                                       data_file.ds.domain_left_edge,
                                       data_file.ds.domain_right_edge)
            regions.add_data_file(pos, data_file.file_id)
            morton.append(compute_morton(
                    pos[:,0], pos[:,1], pos[:,2],
                    data_file.ds.domain_left_edge,
                    data_file.ds.domain_right_edge))
        return np.concatenate(morton)
particle_fields.py 文件源码 项目:yt 作者: yt-project 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def particle_vector_functions(ptype, coord_names, vel_names, registry):

    unit_system = registry.ds.unit_system

    # This will column_stack a set of scalars to create vector fields.

    def _get_vec_func(_ptype, names):
        def particle_vectors(field, data):
            v = [data[_ptype, name].in_units(field.units)
                  for name in names]
            c = np.column_stack(v)
            return data.apply_units(c, field.units)
        return particle_vectors
    registry.add_field((ptype, "particle_position"),
                       sampling_type="particle",
                       function=_get_vec_func(ptype, coord_names),
                       units = "code_length")
    registry.add_field((ptype, "particle_velocity"),
                       sampling_type="particle",
                       function=_get_vec_func(ptype, vel_names),
                       units = unit_system["velocity"])
sandSpline.py 文件源码 项目:sand-spline 作者: inconvergent 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __next__(self):
    try:
      g = next(self.guide)
    except Exception:
      raise StopIteration

    pnum = self.pnum

    r = 1.0-2.0*random(pnum)
    self.noise[:] += r*self.scale

    a = random(pnum)*TWOPI
    rnd = column_stack((cos(a), sin(a)))

    self.path += rnd * reshape(self.noise, (self.pnum,1))
    self.interpolated_path = _rnd_interpolate(self.path, self.inum, ordered=ORDERED)

    self.i+=1
    return g + self.interpolated_path
models.py 文件源码 项目:AutoML5 作者: djajetic 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def predict_proba(self, X):
        if len(X.shape)==1: # IG modif Feb3 2015
            X = np.reshape(X,(-1,1))   
        prediction = self.predictors[0].predict_proba(X)
        if self.n_label==2:                 # Keep only 1 prediction, 1st column = (1 - 2nd column)
            prediction = prediction[:,1]
        for i in range(1,self.n_target): # More than 1 target, we assume that labels are binary
            new_prediction = self.predictors[i].predict_proba(X)[:,1]
            prediction = np.column_stack((prediction, new_prediction))
        return prediction
kernels.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def sq_dist(a, b=None):
    #mean-center for numerical stability
    D, n = a.shape[0], a.shape[1]
    if (b is None):
        mu = a.mean(axis=1)
        a -= mu[:, np.newaxis]
        b = a
        m = n
        aSq = np.sum(a**2, axis=0)
        bSq = aSq
    else:
        d, m = b.shape[0], b.shape[1]
        if (d != D): raise Exception('column lengths must agree')
        mu = (float(m)/float(m+n))*b.mean(axis=1) + (float(n)/float(m+n))*a.mean(axis=1)
        a -= mu[:, np.newaxis]
        b -= mu[:, np.newaxis]      
        aSq = np.sum(a**2, axis=0)
        bSq = np.sum(b**2, axis=0)

    C = np.tile(np.column_stack(aSq).T, (1, m)) + np.tile(bSq, (n, 1)) - 2*a.T.dot(b)
    C = np.maximum(C, 0)    #remove numerical noise
    return C

#evaluate 'power sums' of the individual terms in Z
kernels.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, X, pos):
        Kernel.__init__(self)
        self.X_scaled = X/np.sqrt(X.shape[1])
        d = pos.shape[0]
        self.D = np.abs(np.tile(np.column_stack(pos).T, (1, d)) - np.tile(pos, (d, 1))) / 100000.0
kernels.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, X, pos):
        Kernel.__init__(self)
        self.X_scaled = X/np.sqrt(X.shape[1])
        d = pos.shape[0]
        self.D = np.abs(np.tile(np.column_stack(pos).T, (1, d)) - np.tile(pos, (d, 1))) / 100000.0
kernels.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, X, pos):
        Kernel.__init__(self)
        self.X_scaled = X/np.sqrt(X.shape[1])
        d = pos.shape[0]
        self.D = np.abs(np.tile(np.column_stack(pos).T, (1, d)) - np.tile(pos, (d, 1))) / 100000.0


问题


面经


文章

微信
公众号

扫码关注公众号