python类ascontiguousarray()的实例源码

colorconv.py 文件源码 项目:FCN_train 作者: 315386775 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def _convert(matrix, arr):
    """Do the color space conversion.

    Parameters
    ----------
    matrix : array_like
        The 3x3 matrix to use.
    arr : array_like
        The input array.

    Returns
    -------
    out : ndarray, dtype=float
        The converted array.
    """
    arr = _prepare_colorarray(arr)
    arr = np.swapaxes(arr, 0, -1)
    oldshape = arr.shape
    arr = np.reshape(arr, (3, -1))
    out = np.dot(matrix, arr)
    out.shape = oldshape
    out = np.swapaxes(out, -1, 0)

    return np.ascontiguousarray(out)
solution_classes.py 文件源码 项目:risk-slim 作者: ustunb 项目源码 文件源码 阅读 53 收藏 0 点赞 0 评论 0
def filter_sort_unique(self, max_objval=float('Inf')):
        # filter
        if max_objval < float('inf'):
            good_idx = self.objvals <= max_objval
            self.objvals = self.objvals[good_idx]
            self.solutions = self.solutions[good_idx]

        if len(self.objvals) > 0:
            sort_idx = np.argsort(self.objvals)
            self.objvals = self.objvals[sort_idx]
            self.solutions = self.solutions[sort_idx]

            # unique
            b = np.ascontiguousarray(self.solutions).view(
                np.dtype((np.void, self.solutions.dtype.itemsize * self.P)))
            _, unique_idx = np.unique(b, return_index=True)
            self.objvals = self.objvals[unique_idx]
            self.solutions = self.solutions[unique_idx]
cartpole_wrapper.py 文件源码 项目:pytorch-nec 作者: mjacar 项目源码 文件源码 阅读 46 收藏 0 点赞 0 评论 0
def get_screen(self):
    screen = self.env.render(mode='rgb_array').transpose(
        (2, 0, 1))  # transpose into torch order (CHW)
    # Strip off the top and bottom of the screen
    screen = screen[:, 160:320]
    view_width = 320
    cart_location = self.get_cart_location()
    if cart_location < view_width // 2:
        slice_range = slice(view_width)
    elif cart_location > (self.screen_width - view_width // 2):
        slice_range = slice(-view_width, None)
    else:
        slice_range = slice(cart_location - view_width // 2,
                            cart_location + view_width // 2)
    # Strip off the edges, so that we have a square image centered on a cart
    screen = screen[:, :, slice_range]
    # Convert to float, rescare, convert to torch tensor
    # (this doesn't require a copy)
    screen = np.ascontiguousarray(screen, dtype=np.float32) / 255
    screen = torch.from_numpy(screen)
    # Resize, and add a batch dimension (BCHW)
    return self.resize(screen).numpy()
roidb.py 文件源码 项目:adversarial-frcnn 作者: xiaolonw 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _compute_targets(rois, overlaps, labels):
    """Compute bounding-box regression targets for an image."""
    # Indices of ground-truth ROIs
    gt_inds = np.where(overlaps == 1)[0]
    if len(gt_inds) == 0:
        # Bail if the image has no ground-truth ROIs
        return np.zeros((rois.shape[0], 5), dtype=np.float32)
    # Indices of examples for which we try to make predictions
    ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]

    # Get IoU overlap between each ex ROI and gt ROI
    ex_gt_overlaps = bbox_overlaps(
        np.ascontiguousarray(rois[ex_inds, :], dtype=np.float),
        np.ascontiguousarray(rois[gt_inds, :], dtype=np.float))

    # Find which gt ROI each ex ROI has max overlap with:
    # this will be the ex ROI's gt target
    gt_assignment = ex_gt_overlaps.argmax(axis=1)
    gt_rois = rois[gt_inds[gt_assignment], :]
    ex_rois = rois[ex_inds, :]

    targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
    targets[ex_inds, 0] = labels[ex_inds]
    targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
    return targets
topic_models.py 文件源码 项目:slda 作者: Savvysherpa 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def fit(self, X, y):
        """
        Estimate the topic distributions per document (theta), term
        distributions per topic (phi), and regression coefficients (eta).

        Parameters
        ----------
        X : array-like, shape = (n_docs, n_terms)
            The document-term matrix.

        y : array-like, shape = (n_docs,)
            Response values for each document.
        """

        self.doc_term_matrix = X
        self.n_docs, self.n_terms = X.shape
        self.n_tokens = X.sum()
        doc_lookup, term_lookup = self._create_lookups(X)
        # iterate
        self.theta, self.phi, self.eta, self.loglikelihoods = gibbs_sampler_slda(
            self.n_iter, self.n_report_iter,
            self.n_topics, self.n_docs, self.n_terms, self.n_tokens,
            self.alpha, self.beta, self.mu, self.nu2, self.sigma2,
            doc_lookup, term_lookup,
            np.ascontiguousarray(y, dtype=np.float64), self.seed)
topic_models.py 文件源码 项目:slda 作者: Savvysherpa 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def fit(self, X, y):
        """
        Estimate the topic distributions per document (theta), term
        distributions per topic (phi), and regression coefficients (eta).

        Parameters
        ----------
        X : array-like, shape = (n_docs, n_terms)
            The document-term matrix.

        y : array-like, shape = (n_docs,)
            Response values for each document.
        """

        self.doc_term_matrix = X
        self.n_docs, self.n_terms = X.shape
        self.n_tokens = X.sum()
        doc_lookup, term_lookup = self._create_lookups(X)
        # iterate
        self.theta, self.phi, self.eta, self.loglikelihoods = gibbs_sampler_blslda(
            self.n_iter, self.n_report_iter,
            self.n_topics, self.n_docs, self.n_terms, self.n_tokens,
            self.alpha, self.beta, self.mu, self.nu2, self.b,
            doc_lookup, term_lookup,
            np.ascontiguousarray(y, dtype=np.float64), self.seed)
topic_models.py 文件源码 项目:slda 作者: Savvysherpa 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def fit(self, X, y):
        """
        Estimate the topic distributions per document (theta), term
        distributions per topic (phi), and regression coefficients (eta).

        Parameters
        ----------
        X : array-like, shape = (n_docs, n_terms)
            The document-term matrix.

        y : array-like, shape = (n_docs,)
            Response values for each document.
        """

        self.doc_term_matrix = X
        self.n_docs, self.n_terms = X.shape
        self.n_tokens = X.sum()
        doc_lookup, term_lookup = self._create_lookups(X)
        # iterate
        self.theta, self.phi, self.eta, self.loglikelihoods = gibbs_sampler_slda(
            self.n_iter, self.n_report_iter,
            self.n_topics, self.n_docs, self.n_terms, self.n_tokens,
            self.alpha, self.beta, self.mu, self.nu2, self.sigma2,
            doc_lookup, term_lookup,
            np.ascontiguousarray(y, dtype=np.float64), self.seed)
topic_models.py 文件源码 项目:slda 作者: Savvysherpa 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def fit(self, X, y):
        """
        Estimate the topic distributions per document (theta), term
        distributions per topic (phi), and regression coefficients (eta).

        Parameters
        ----------
        X : array-like, shape = (n_docs, n_terms)
            The document-term matrix.

        y : array-like, shape = (n_docs,)
            Response values for each document.
        """

        self.doc_term_matrix = X
        self.n_docs, self.n_terms = X.shape
        self.n_tokens = X.sum()
        doc_lookup, term_lookup = self._create_lookups(X)
        # iterate
        self.theta, self.phi, self.eta, self.loglikelihoods = gibbs_sampler_blslda(
            self.n_iter, self.n_report_iter,
            self.n_topics, self.n_docs, self.n_terms, self.n_tokens,
            self.alpha, self.beta, self.mu, self.nu2, self.b,
            doc_lookup, term_lookup,
            np.ascontiguousarray(y, dtype=np.float64), self.seed)
roidb.py 文件源码 项目:faster-rcnn-resnet 作者: Eniac-Xie 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _compute_targets(rois, overlaps, labels):
    """Compute bounding-box regression targets for an image."""
    # Indices of ground-truth ROIs
    gt_inds = np.where(overlaps == 1)[0]
    if len(gt_inds) == 0:
        # Bail if the image has no ground-truth ROIs
        return np.zeros((rois.shape[0], 5), dtype=np.float32)
    # Indices of examples for which we try to make predictions
    ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]

    # Get IoU overlap between each ex ROI and gt ROI
    ex_gt_overlaps = bbox_overlaps(
        np.ascontiguousarray(rois[ex_inds, :], dtype=np.float),
        np.ascontiguousarray(rois[gt_inds, :], dtype=np.float))

    # Find which gt ROI each ex ROI has max overlap with:
    # this will be the ex ROI's gt target
    gt_assignment = ex_gt_overlaps.argmax(axis=1)
    gt_rois = rois[gt_inds[gt_assignment], :]
    ex_rois = rois[ex_inds, :]

    targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
    targets[ex_inds, 0] = labels[ex_inds]
    targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
    return targets
libtcodpy.py 文件源码 项目:warhexer 作者: sudasana 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def console_fill_foreground(con,r,g,b) :
    if len(r) != len(g) or len(r) != len(b):
        raise TypeError('R, G and B must all have the same size.')

    if (numpy_available and isinstance(r, numpy.ndarray) and
        isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
        #numpy arrays, use numpy's ctypes functions
        r = numpy.ascontiguousarray(r, dtype=numpy.int_)
        g = numpy.ascontiguousarray(g, dtype=numpy.int_)
        b = numpy.ascontiguousarray(b, dtype=numpy.int_)
        cr = r.ctypes.data_as(POINTER(c_int))
        cg = g.ctypes.data_as(POINTER(c_int))
        cb = b.ctypes.data_as(POINTER(c_int))
    else:
        # otherwise convert using ctypes arrays
        cr = (c_int * len(r))(*r)
        cg = (c_int * len(g))(*g)
        cb = (c_int * len(b))(*b)

    _lib.TCOD_console_fill_foreground(con, cr, cg, cb)
libtcodpy.py 文件源码 项目:warhexer 作者: sudasana 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def console_fill_background(con,r,g,b) :
    if len(r) != len(g) or len(r) != len(b):
        raise TypeError('R, G and B must all have the same size.')

    if (numpy_available and isinstance(r, numpy.ndarray) and
        isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
        #numpy arrays, use numpy's ctypes functions
        r = numpy.ascontiguousarray(r, dtype=numpy.int_)
        g = numpy.ascontiguousarray(g, dtype=numpy.int_)
        b = numpy.ascontiguousarray(b, dtype=numpy.int_)
        cr = r.ctypes.data_as(POINTER(c_int))
        cg = g.ctypes.data_as(POINTER(c_int))
        cb = b.ctypes.data_as(POINTER(c_int))
    else:
        # otherwise convert using ctypes arrays
        cr = (c_int * len(r))(*r)
        cg = (c_int * len(g))(*g)
        cb = (c_int * len(b))(*b)

    _lib.TCOD_console_fill_background(con, cr, cg, cb)
segmenter.py 文件源码 项目:train-DeepLab 作者: martinkersner 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def predict(self, inputs):
    # uses MEMORY_DATA layer for loading images and postprocessing DENSE_CRF layer
    img = inputs[0].transpose((2, 0, 1))
    img = img[np.newaxis, :].astype(np.float32)
    label = np.zeros((1, 1, 1, 1), np.float32)
    data_dim = np.zeros((1, 1, 1, 2), np.float32)
    data_dim[0][0][0][0] = img.shape[2]
    data_dim[0][0][0][1] = img.shape[3]

    img      = np.ascontiguousarray(img, dtype=np.float32)
    label    = np.ascontiguousarray(label, dtype=np.float32)
    data_dim = np.ascontiguousarray(data_dim, dtype=np.float32)

    self.set_input_arrays(img, label, data_dim)
    out = self.forward()

    predictions = out[self.outputs[0]] # the output layer should be called crf_inf
    segm_result = predictions[0].argmax(axis=0).astype(np.uint8)

    return segm_result
unique.py 文件源码 项目:lps-anchor-pos-estimator 作者: bitcraze 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def unique(eq):
    eq = eqsize(eq)
    c1 = [None] * eq.shape
    for i in range(0, eq.size):
        c1.append[i] = hash(eq[i])

    c1 = np.asarray(c1)

    if c1.ndim == 1:
        _, ia, ic = np.unique(c1, return_index=True, return_inverse=True)
        ia = (ia[:, ]).conj().T
        ic = (ic[:, ]).conj().T
        u = eq[ia]

    else:
        a = c1
        b = np.ascontiguousarray(a).view(
            np.dtype((np.void, a.dtype.itemsize * a.shape[1])))
        _, ia, ic = np.unique(b, return_index=True, return_inverse=True)

    return u, ia, ic
roidb.py 文件源码 项目:py-faster-rcnn-tk1 作者: joeking11829 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _compute_targets(rois, overlaps, labels):
    """Compute bounding-box regression targets for an image."""
    # Indices of ground-truth ROIs
    gt_inds = np.where(overlaps == 1)[0]
    # Indices of examples for which we try to make predictions
    ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]

    # Get IoU overlap between each ex ROI and gt ROI
    ex_gt_overlaps = bbox_overlaps(
        np.ascontiguousarray(rois[ex_inds, :], dtype=np.float),
        np.ascontiguousarray(rois[gt_inds, :], dtype=np.float))

    # Find which gt ROI each ex ROI has max overlap with:
    # this will be the ex ROI's gt target
    gt_assignment = ex_gt_overlaps.argmax(axis=1)
    gt_rois = rois[gt_inds[gt_assignment], :]
    ex_rois = rois[ex_inds, :]

    targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
    targets[ex_inds, 0] = labels[ex_inds]
    targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
    return targets
data_utils.py 文件源码 项目:semantic-tagging 作者: bjerva 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def write_features_hdf5(train, valid, test):
    f = h5py.File(args.hdf5, "w")

    train_grp = f.create_group("train")
    train_x = train_grp.create_dataset("train_x", train[0].shape, dtype='f', compression="gzip", compression_opts=9)
    train_y = train_grp.create_dataset("train_y", train[1].shape, dtype='i', compression="gzip", compression_opts=9)

    valid_grp = f.create_group("valid")
    valid_x = valid_grp.create_dataset("valid_x", valid[0].shape, dtype='f', compression="gzip", compression_opts=9)
    valid_y = valid_grp.create_dataset("valid_y", valid[1].shape, dtype='i', compression="gzip", compression_opts=9)

    test_grp = f.create_group("test")
    test_x = test_grp.create_dataset("test_x", test[0].shape, dtype='f', compression="gzip", compression_opts=9)
    test_y = test_grp.create_dataset("test_y", test[1].shape, dtype='i', compression="gzip", compression_opts=9)

    train_x.write_direct(np.ascontiguousarray(train[0], dtype=train[0].dtype))
    train_y.write_direct(np.ascontiguousarray(train[1], dtype=train[1].dtype))
    valid_x.write_direct(np.ascontiguousarray(valid[0], dtype=valid[0].dtype))
    valid_y.write_direct(np.ascontiguousarray(valid[1], dtype=valid[1].dtype))
    test_x.write_direct(np.ascontiguousarray(test[0], dtype=test[0].dtype))
    test_y.write_direct(np.ascontiguousarray(test[1], dtype=test[1].dtype))

    f.close()
image_utils.py 文件源码 项目:pytorch-smoothgrad 作者: pkdn 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def preprocess_image(img, cuda=False):
    means=[0.485, 0.456, 0.406]
    stds=[0.229, 0.224, 0.225]

    preprocessed_img = img.copy()[: , :, ::-1]
    for i in range(3):
        preprocessed_img[:, :, i] = preprocessed_img[:, :, i] - means[i]
        preprocessed_img[:, :, i] = preprocessed_img[:, :, i] / stds[i]
    preprocessed_img = \
        np.ascontiguousarray(np.transpose(preprocessed_img, (2, 0, 1)))
    preprocessed_img = torch.from_numpy(preprocessed_img)
    preprocessed_img.unsqueeze_(0)
    if cuda:
        preprocessed_img = Variable(preprocessed_img.cuda(), requires_grad=True)
    else:
        preprocessed_img = Variable(preprocessed_img, requires_grad=True)

    return preprocessed_img
switching.py 文件源码 项目:Auspex 作者: BBN-Q 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def find_boundary(mesh,vals,threshold=0.5):
    """ Find boundary points on the phase diagram where the switching probability = threshold """
    boundary_points = []
    durs = mesh.points[:,0]
    volts = mesh.points[:,1]
    indices, indptr = mesh.vertex_neighbor_vertices
    for k in range(len(vals)):
        for k_nb in indptr[indices[k]:indices[k+1]]:
            if (vals[k]-threshold)*(vals[k_nb]-threshold)<0:
                x0 = find_cross([durs[k],vals[k]],[durs[k_nb],vals[k_nb]],cut=threshold)
                y0 = find_cross([volts[k],vals[k]],[volts[k_nb],vals[k_nb]],cut=threshold)
                boundary_points.append([x0,y0])

    boundary_points = np.array(boundary_points)
    if len(boundary_points) > 0:
        b = np.ascontiguousarray(boundary_points).view(np.dtype((np.void,
                            boundary_points.dtype.itemsize * boundary_points.shape[1])))
        _, idx = np.unique(b, return_index=True)
        boundary_points = boundary_points[idx]
        # Sort the boundary_points by x-axis
        boundary_points = sorted(boundary_points, key=itemgetter(0))
    return np.array(boundary_points)
information_process.py 文件源码 项目:IDNNs 作者: ravidziv 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def calc_information_sampling(data, bins, pys1, pxs, label, b, b1, len_unique_a, p_YgX, unique_inverse_x,
                              unique_inverse_y, calc_DKL=False):
    bins = bins.astype(np.float32)
    num_of_bins = bins.shape[0]
    # bins = stats.mstats.mquantiles(np.squeeze(data.reshape(1, -1)), np.linspace(0,1, num=num_of_bins))
    # hist, bin_edges = np.histogram(np.squeeze(data.reshape(1, -1)), normed=True)
    digitized = bins[np.digitize(np.squeeze(data.reshape(1, -1)), bins) - 1].reshape(len(data), -1)
    b2 = np.ascontiguousarray(digitized).view(
        np.dtype((np.void, digitized.dtype.itemsize * digitized.shape[1])))
    unique_array, unique_inverse_t, unique_counts = \
        np.unique(b2, return_index=False, return_inverse=True, return_counts=True)
    p_ts = unique_counts / float(sum(unique_counts))
    PXs, PYs = np.asarray(pxs).T, np.asarray(pys1).T
    if calc_DKL:
        pxy_given_T = np.array(
            [calc_probs(i, unique_inverse_t, label, b, b1, len_unique_a) for i in range(0, len(unique_array))]
        )
        p_XgT = np.vstack(pxy_given_T[:, 0])
        p_YgT = pxy_given_T[:, 1]
        p_YgT = np.vstack(p_YgT).T
        DKL_YgX_YgT = np.sum([inf_ut.KL(c_p_YgX, p_YgT.T) for c_p_YgX in p_YgX.T], axis=0)
        H_Xgt = np.nansum(p_XgT * np.log2(p_XgT), axis=1)
    local_IXT, local_ITY = calc_information_from_mat(PXs, PYs, p_ts, digitized, unique_inverse_x, unique_inverse_y,
                                                     unique_array)
    return local_IXT, local_ITY
information_process.py 文件源码 项目:IDNNs 作者: ravidziv 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def calc_by_sampling_neurons(ws_iter_index, num_of_samples, label, sigma, bins, pxs):
    iter_infomration = []
    for j in range(len(ws_iter_index)):
        data = ws_iter_index[j]
        new_data = np.zeros((num_of_samples * data.shape[0], data.shape[1]))
        labels = np.zeros((num_of_samples * label.shape[0], label.shape[1]))
        x = np.zeros((num_of_samples * data.shape[0], 2))
        for i in range(data.shape[0]):
            cov_matrix = np.eye(data[i, :].shape[0]) * sigma
            t_i = np.random.multivariate_normal(data[i, :], cov_matrix, num_of_samples)
            new_data[num_of_samples * i:(num_of_samples * (i + 1)), :] = t_i
            labels[num_of_samples * i:(num_of_samples * (i + 1)), :] = label[i, :]
            x[num_of_samples * i:(num_of_samples * (i + 1)), 0] = i
        b = np.ascontiguousarray(x).view(np.dtype((np.void, x.dtype.itemsize * x.shape[1])))
        unique_array, unique_indices, unique_inverse_x, unique_counts = \
            np.unique(b, return_index=True, return_inverse=True, return_counts=True)
        b_y = np.ascontiguousarray(labels).view(np.dtype((np.void, labels.dtype.itemsize * labels.shape[1])))
        unique_array_y, unique_indices_y, unique_inverse_y, unique_counts_y = \
            np.unique(b_y, return_index=True, return_inverse=True, return_counts=True)
        pys1 = unique_counts_y / float(np.sum(unique_counts_y))
        iter_infomration.append(
            calc_information_for_layer(data=new_data, bins=bins, unique_inverse_x=unique_inverse_x,
                                       unique_inverse_y=unique_inverse_y, pxs=pxs, pys1=pys1))
        params = np.array(iter_infomration)
        return params
information_process.py 文件源码 项目:IDNNs 作者: ravidziv 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def extract_probs(label, x):
    """calculate the probabilities of the given data and labels p(x), p(y) and (y|x)"""
    pys = np.sum(label, axis=0) / float(label.shape[0])
    b = np.ascontiguousarray(x).view(np.dtype((np.void, x.dtype.itemsize * x.shape[1])))
    unique_array, unique_indices, unique_inverse_x, unique_counts = \
        np.unique(b, return_index=True, return_inverse=True, return_counts=True)
    unique_a = x[unique_indices]
    b1 = np.ascontiguousarray(unique_a).view(np.dtype((np.void, unique_a.dtype.itemsize * unique_a.shape[1])))
    pxs = unique_counts / float(np.sum(unique_counts))
    p_y_given_x = []
    for i in range(0, len(unique_array)):
        indexs = unique_inverse_x == i
        py_x_current = np.mean(label[indexs, :], axis=0)
        p_y_given_x.append(py_x_current)
    p_y_given_x = np.array(p_y_given_x).T
    b_y = np.ascontiguousarray(label).view(np.dtype((np.void, label.dtype.itemsize * label.shape[1])))
    unique_array_y, unique_indices_y, unique_inverse_y, unique_counts_y = \
        np.unique(b_y, return_index=True, return_inverse=True, return_counts=True)
    pys1 = unique_counts_y / float(np.sum(unique_counts_y))
    return pys, pys1, p_y_given_x, b1, b, unique_a, unique_inverse_x, unique_inverse_y, pxs
roidb.py 文件源码 项目:py-faster-rcnn-resnet-imagenet 作者: tianzhi0549 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _compute_targets(rois, overlaps, labels):
    """Compute bounding-box regression targets for an image."""
    # Indices of ground-truth ROIs
    gt_inds = np.where(overlaps == 1)[0]
    if len(gt_inds) == 0:
        # Bail if the image has no ground-truth ROIs
        return np.zeros((rois.shape[0], 5), dtype=np.float32)
    # Indices of examples for which we try to make predictions
    ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]

    # Get IoU overlap between each ex ROI and gt ROI
    ex_gt_overlaps = bbox_overlaps(
        np.ascontiguousarray(rois[ex_inds, :], dtype=np.float),
        np.ascontiguousarray(rois[gt_inds, :], dtype=np.float))

    # Find which gt ROI each ex ROI has max overlap with:
    # this will be the ex ROI's gt target
    gt_assignment = ex_gt_overlaps.argmax(axis=1)
    gt_rois = rois[gt_inds[gt_assignment], :]
    ex_rois = rois[ex_inds, :]

    targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
    targets[ex_inds, 0] = labels[ex_inds]
    targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
    return targets
data.py 文件源码 项目:polara 作者: Evfro 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def to_coo(self, tensor_mode=False):
        userid, itemid, feedback = self.fields
        user_item_data = self.training[[userid, itemid]].values

        if tensor_mode:
            # TODO this recomputes feedback data every new functon call,
            # but if data has not changed - no need for this, make a property
            new_feedback, feedback_transform = self.reindex(self.training, feedback, inplace=False)
            self.index = self.index._replace(feedback=feedback_transform)

            idx = np.hstack((user_item_data, new_feedback[:, np.newaxis]))
            idx = np.ascontiguousarray(idx)
            val = np.ones(self.training.shape[0],)
        else:
            idx = user_item_data
            val = self.training[feedback].values

        shp = tuple(idx.max(axis=0) + 1)
        idx = idx.astype(np.intp)
        val = np.ascontiguousarray(val)
        return idx, val, shp
roidb.py 文件源码 项目:face-py-faster-rcnn 作者: playerkk 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _compute_targets(rois, overlaps, labels):
    """Compute bounding-box regression targets for an image."""
    # Indices of ground-truth ROIs
    gt_inds = np.where(overlaps == 1)[0]
    if len(gt_inds) == 0:
        # Bail if the image has no ground-truth ROIs
        return np.zeros((rois.shape[0], 5), dtype=np.float32)
    # Indices of examples for which we try to make predictions
    ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]

    # Get IoU overlap between each ex ROI and gt ROI
    ex_gt_overlaps = bbox_overlaps(
        np.ascontiguousarray(rois[ex_inds, :], dtype=np.float),
        np.ascontiguousarray(rois[gt_inds, :], dtype=np.float))

    # Find which gt ROI each ex ROI has max overlap with:
    # this will be the ex ROI's gt target
    gt_assignment = ex_gt_overlaps.argmax(axis=1)
    gt_rois = rois[gt_inds[gt_assignment], :]
    ex_rois = rois[ex_inds, :]

    targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
    targets[ex_inds, 0] = labels[ex_inds]
    targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
    return targets
parcnsdl.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def swap_axis_to_0(x, axis):
    """Insert a new singleton axis at position 0 and swap it with the
    specified axis. The resulting array has an additional dimension,
    with ``axis`` + 1 (which was ``axis`` before the insertion of the
    new axis) of ``x`` at position 0, and a singleton axis at position
    ``axis`` + 1.

    Parameters
    ----------
    x : ndarray
      Input array
    axis : int
      Index of axis in ``x`` to swap to axis index 0.

    Returns
    -------
    arr : ndarray
      Output array
    """

    return np.ascontiguousarray(np.swapaxes(x[np.newaxis, ...], 0, axis+1))
roidb.py 文件源码 项目:Automatic_Group_Photography_Enhancement 作者: Yuliang-Zou 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _compute_targets(rois, overlaps, labels):
    """Compute bounding-box regression targets for an image."""
    # Indices of ground-truth ROIs
    gt_inds = np.where(overlaps == 1)[0]
    if len(gt_inds) == 0:
        # Bail if the image has no ground-truth ROIs
        return np.zeros((rois.shape[0], 5), dtype=np.float32)
    # Indices of examples for which we try to make predictions
    ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]

    # Get IoU overlap between each ex ROI and gt ROI
    ex_gt_overlaps = bbox_overlaps(
        np.ascontiguousarray(rois[ex_inds, :], dtype=np.float),
        np.ascontiguousarray(rois[gt_inds, :], dtype=np.float))

    # Find which gt ROI each ex ROI has max overlap with:
    # this will be the ex ROI's gt target
    gt_assignment = ex_gt_overlaps.argmax(axis=1)
    gt_rois = rois[gt_inds[gt_assignment], :]
    ex_rois = rois[ex_inds, :]

    targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
    targets[ex_inds, 0] = labels[ex_inds]
    targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
    return targets
trackmark.py 文件源码 项目:MDT 作者: cbclab 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def write_tvl_direction_pairs(tvl_filename, tvl_header, direction_pairs):
        """Write the given directions to TVL.

        The direction pairs should be a list with lists containing the vector and value to write. For example:
        ((vec, val), (vec1, val1), ...) up to three pairs are allowed.

        Args:
            tvl_filename (str): the filename to write to
            tvl_header (:class:`list` or tuple): the header for the TVL file. This is a list of either 4 or 10 entries.
                4 entries: [version, res, gap, offset]
                10 entries: [version, x_res, x_gap, x_offset, y_res, y_gap, y_offset, z_res, z_gap, z_offset]
            direction_pairs (list of ndarrays): The list with direction pairs, only three are used.
                This is a list with (vector, magnitude) tuples in which the vectors are 4d volumes with for
                every voxel a 3d coordinate.
        """
        direction_pairs = direction_pairs[0:3]
        dir_matrix = np.zeros(direction_pairs[0][0].shape[0:3] + (12,))
        for ind, dirs in enumerate(direction_pairs):
            dir_matrix[..., ind*3:ind*3+3] = np.ascontiguousarray(TrackMark._ensure_3d(np.squeeze(dirs[0])))
            dir_matrix[..., 9 + ind] = np.ascontiguousarray(TrackMark._ensure_3d(np.squeeze(dirs[1])))

        TrackMark.write_tvl_matrix(tvl_filename, tvl_header, dir_matrix)
roidb.py 文件源码 项目:deep-fashion 作者: zuowang 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def _compute_targets(rois, overlaps, labels):
    """Compute bounding-box regression targets for an image."""
    # Indices of ground-truth ROIs
    gt_inds = np.where(overlaps == 1)[0]
    if len(gt_inds) == 0:
        # Bail if the image has no ground-truth ROIs
        return np.zeros((rois.shape[0], 5), dtype=np.float32)
    # Indices of examples for which we try to make predictions
    ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]

    # Get IoU overlap between each ex ROI and gt ROI
    ex_gt_overlaps = bbox_overlaps(
        np.ascontiguousarray(rois[ex_inds, :], dtype=np.float),
        np.ascontiguousarray(rois[gt_inds, :], dtype=np.float))

    # Find which gt ROI each ex ROI has max overlap with:
    # this will be the ex ROI's gt target
    gt_assignment = ex_gt_overlaps.argmax(axis=1)
    gt_rois = rois[gt_inds[gt_assignment], :]
    ex_rois = rois[ex_inds, :]

    targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
    targets[ex_inds, 0] = labels[ex_inds]
    targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
    return targets
roidb.py 文件源码 项目:RPN 作者: hfut721 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _compute_targets(rois, overlaps, labels):
    """Compute bounding-box regression targets for an image."""
    # Indices of ground-truth ROIs
    gt_inds = np.where(overlaps == 1)[0]
    if len(gt_inds) == 0:
        # Bail if the image has no ground-truth ROIs
        return np.zeros((rois.shape[0], 5), dtype=np.float32)
    # Indices of examples for which we try to make predictions
    ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]

    # Get IoU overlap between each ex ROI and gt ROI
    ex_gt_overlaps = bbox_overlaps(
        np.ascontiguousarray(rois[ex_inds, :], dtype=np.float),
        np.ascontiguousarray(rois[gt_inds, :], dtype=np.float))

    # Find which gt ROI each ex ROI has max overlap with:
    # this will be the ex ROI's gt target
    gt_assignment = ex_gt_overlaps.argmax(axis=1)
    gt_rois = rois[gt_inds[gt_assignment], :]
    ex_rois = rois[ex_inds, :]

    targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
    targets[ex_inds, 0] = labels[ex_inds]
    targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
    return targets
roidb.py 文件源码 项目:Faster-RCNN_TF 作者: smallcorgi 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _compute_targets(rois, overlaps, labels):
    """Compute bounding-box regression targets for an image."""
    # Indices of ground-truth ROIs
    gt_inds = np.where(overlaps == 1)[0]
    if len(gt_inds) == 0:
        # Bail if the image has no ground-truth ROIs
        return np.zeros((rois.shape[0], 5), dtype=np.float32)
    # Indices of examples for which we try to make predictions
    ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]

    # Get IoU overlap between each ex ROI and gt ROI
    ex_gt_overlaps = bbox_overlaps(
        np.ascontiguousarray(rois[ex_inds, :], dtype=np.float),
        np.ascontiguousarray(rois[gt_inds, :], dtype=np.float))

    # Find which gt ROI each ex ROI has max overlap with:
    # this will be the ex ROI's gt target
    gt_assignment = ex_gt_overlaps.argmax(axis=1)
    gt_rois = rois[gt_inds[gt_assignment], :]
    ex_rois = rois[ex_inds, :]

    targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
    targets[ex_inds, 0] = labels[ex_inds]
    targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
    return targets
libtcodpy.py 文件源码 项目:df-style-worldgen 作者: Dozed12 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def console_fill_foreground(con,r,g,b) :
    if len(r) != len(g) or len(r) != len(b):
        raise TypeError('R, G and B must all have the same size.')

    if (numpy_available and isinstance(r, numpy.ndarray) and
        isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
        #numpy arrays, use numpy's ctypes functions
        r = numpy.ascontiguousarray(r, dtype=numpy.int_)
        g = numpy.ascontiguousarray(g, dtype=numpy.int_)
        b = numpy.ascontiguousarray(b, dtype=numpy.int_)
        cr = r.ctypes.data_as(POINTER(c_int))
        cg = g.ctypes.data_as(POINTER(c_int))
        cb = b.ctypes.data_as(POINTER(c_int))
    else:
        # otherwise convert using ctypes arrays
        cr = (c_int * len(r))(*r)
        cg = (c_int * len(g))(*g)
        cb = (c_int * len(b))(*b)

    _lib.TCOD_console_fill_foreground(con, cr, cg, cb)


问题


面经


文章

微信
公众号

扫码关注公众号