python类vstack()的实例源码

base_klt.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def visualize(self, vis, colored=True): 

        try: 
            tids = set(self.ids)
        except: 
            return vis

        for hid, hbox in izip(self.ids, self.bboxes): 
            cv2.rectangle(vis, (hbox[0], hbox[1]), (hbox[2], hbox[3]), (0,255,0), 1)

        vis = super(BoundingBoxKLT, self).viz(vis, colored=colored)

        # for tid, pts in self.tm_.tracks.iteritems(): 
        #     if tid not in tids: continue
        #     cv2.polylines(vis, [np.vstack(pts.items).astype(np.int32)[-4:]], False, 
        #                   (0,255,0), thickness=1)
        #     tl, br = np.int32(pts.latest_item)-2, np.int32(pts.latest_item)+2
        #     cv2.rectangle(vis, (tl[0], tl[1]), (br[0], br[1]), (0,255,0), -1)

        # OpenCVKLT.draw_tracks(self, vis, colored=colored, max_track_length=10)
        return vis
draw_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def draw_bboxes(vis, bboxes, texts=None, ellipse=False, colored=True):
    if not len(bboxes): 
        return vis

    if not colored: 
        cols = np.tile([240,240,240], [len(bboxes), 1])
    else: 
        N = 20
        cwheel = colormap(np.linspace(0, 1, N))
        cols = np.vstack([cwheel[idx % N] for idx, _ in enumerate(bboxes)])            

    texts = [None] * len(bboxes) if texts is None else texts
    for col, b, t in zip(cols, bboxes, texts): 
        if ellipse: 
            cv2.ellipse(vis, ((b[0]+b[2])/2, (b[1]+b[3])/2), ((b[2]-b[0])/2, (b[3]-b[1])/2), 0, 0, 360, 
                        color=tuple(col), thickness=1)
        else: 
            cv2.rectangle(vis, (b[0], b[1]), (b[2], b[3]), tuple(col), 2)
        if t: 
            annotate_bbox(vis, b, title=t)
    return vis
base_klt.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def draw_tracks(self, out, colored=False, color_type='unique', min_track_length=4, max_track_length=4):
        """
        color_type: {age, unique}
        """

        N = 20
        # inds = self.confident_tracks(min_length=min_track_length)
        # if not len(inds): 
        #     return

        # ids, pts = self.latest_ids[inds], self.latest_pts[inds]
        # lengths = self.tm_.lengths[inds]

        ids, pts, lengths = self.latest_ids, self.latest_pts, self.tm_.lengths

        if color_type == 'unique': 
            cwheel = colormap(np.linspace(0, 1, N))
            cols = np.vstack([cwheel[tid % N] for idx, tid in enumerate(ids)])
        elif color_type == 'age': 
            cols = colormap(lengths)
        else: 
            raise ValueError('Color type {:} undefined, use age or unique'.format(color_type))

        if not colored: 
            cols = np.tile([0,240,0], [len(self.tm_.tracks), 1])

        for col, pts in izip(cols.astype(np.int64), self.tm_.tracks.itervalues()): 
            cv2.polylines(out, [np.vstack(pts.items).astype(np.int32)[-max_track_length:]], False, 
                          tuple(col), thickness=1)
            tl, br = np.int32(pts.latest_item)-2, np.int32(pts.latest_item)+2
            cv2.rectangle(out, (tl[0], tl[1]), (br[0], br[1]), tuple(col), -1)
utils.py 文件源码 项目:cnn-graph-classification 作者: giannisnik 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def compute_nystrom(ds_name, use_node_labels, embedding_dim, community_detection_method, kernels):
    if ds_name=="SYNTHETIC":
        graphs, labels = generate_synthetic()
    else:
        graphs, labels = load_data(ds_name, use_node_labels)
    communities, subgraphs = compute_communities(graphs, use_node_labels, community_detection_method)

    print("Number of communities: ", len(communities))
    lens = []
    for community in communities:
        lens.append(community.number_of_nodes())

    print("Average size: %.2f" % np.mean(lens))
    Q=[]
    for idx, k in enumerate(kernels):
        model = Nystrom(k, n_components=embedding_dim)
        model.fit(communities)
        Q_t = model.transform(communities)
        Q_t = np.vstack([np.zeros(embedding_dim), Q_t])
        Q.append(Q_t)

    return Q, subgraphs, labels, Q_t.shape
unet_d8g_222f.py 文件源码 项目:kaggle_dsb2017 作者: astoc 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def grid_data(source, grid=32, crop=16, expand=12):
    gridsize = grid + 2 * expand
    stacksize = source.shape[0]
    height = source.shape[3]  # should be 224 for our data
    width = source.shape[4]

    gridheight = (height - 2 * crop) // grid  # should be 6 for our data
    gridwidth = (width - 2 * crop) // grid
    cells = []
    for j in range(gridheight):
        for i in range (gridwidth):
            cell = source[:,:,:, crop+j*grid-expand:crop+(j+1)*grid+expand, crop+i*grid-expand:crop+(i+1)*grid+expand]
            cells.append(cell)

    cells = np.vstack (cells)

    return cells, gridwidth, gridheight
cumulative.py 文件源码 项目:zipline-chinese 作者: zhanghan1990 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def calculate_beta(self):
        """

        .. math::

            \\beta_a = \\frac{\mathrm{Cov}(r_a,r_p)}{\mathrm{Var}(r_p)}

        http://en.wikipedia.org/wiki/Beta_(finance)
        """
        # it doesn't make much sense to calculate beta for less than two
        # values, so return none.
        if len(self.algorithm_returns) < 2:
            return 0.0

        returns_matrix = np.vstack([self.algorithm_returns,
                                    self.benchmark_returns])
        C = np.cov(returns_matrix, ddof=1)
        algorithm_covariance = C[0][1]
        benchmark_variance = C[1][1]
        beta = algorithm_covariance / benchmark_variance

        return beta
BLISS.py 文件源码 项目:Lattice-Based-Signatures 作者: krishnacharya 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def KeyGen(**kwargs):
    '''
    Appendix B of BLISS paper
    m_bar = m + n

    o/p:    
    A: Public Key n x m' numpy array
    S: Secret Key m'x n numpy array
    '''
    q, n, m, alpha = kwargs['q'], kwargs['n'], kwargs['m'], kwargs['alpha']
    Aq_bar = util.crypt_secure_matrix(-(q-1)/2, (q-1)/2, n, m)
    S_bar = util.crypt_secure_matrix(-(2)**alpha, (2)**alpha, m, n) # alpha is small enough, we need not reduce (modq)
    S = np.vstack((S_bar, np.eye(n, dtype = int))) # dimension is m_bar x n, Elements are in Z mod(2q)
    A = np.hstack((2*Aq_bar, q * np.eye(n, dtype = int) - 2*np.matmul(Aq_bar,S_bar))) # dimension is n x m_bar , Elements are in Z mod(2q)
    #return util.matrix_to_Zq(A, 2*q), S, Aq_bar, S_bar
    return util.matrix_to_Zq(A, 2*q), S
fishAffine3D.py 文件源码 项目:pycpd 作者: siavashk 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def main():
    fish = loadmat('./data/fish.mat')

    X1 = np.zeros((fish['X'].shape[0], fish['X'].shape[1] + 1))
    X1[:,:-1] = fish['X']
    X2 = np.ones((fish['X'].shape[0], fish['X'].shape[1] + 1))
    X2[:,:-1] = fish['X']
    X = np.vstack((X1, X2))

    Y1 = np.zeros((fish['Y'].shape[0], fish['Y'].shape[1] + 1))
    Y1[:,:-1] = fish['Y']
    Y2 = np.ones((fish['Y'].shape[0], fish['Y'].shape[1] + 1))
    Y2[:,:-1] = fish['Y']
    Y = np.vstack((Y1, Y2))

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    callback = partial(visualize, ax=ax)

    reg = affine_registration(X, Y)
    reg.register(callback)
    plt.show()
pc_util.py 文件源码 项目:pointnet 作者: charlesq34 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def volume_to_point_cloud(vol):
    """ vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize
        return Nx3 numpy array.
    """
    vsize = vol.shape[0]
    assert(vol.shape[1] == vsize and vol.shape[1] == vsize)
    points = []
    for a in range(vsize):
        for b in range(vsize):
            for c in range(vsize):
                if vol[a,b,c] == 1:
                    points.append(np.array([a,b,c]))
    if len(points) == 0:
        return np.zeros((0,3))
    points = np.vstack(points)
    return points

# ----------------------------------------
# Point cloud IO
# ----------------------------------------
test_metrics.py 文件源码 项目:GraphTime 作者: GlooperLabs 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def standard_case(self):
        """Create standard testcase from Thetas defined in this Testcase. The following
        metrics can be calculated by hand and should match the computations:

        precisions: [1, 1, 0, 2/3, 1]
        recalls: [1, 1, 0, 1, 0.5]
        f1s: [1, 1, 0, 0.8, 2/3]
        tps: 1 + 1 + 0 + 2 + 1 = 5
        fps: 0 + 0 + 1 + 1 + 0 = 2
        fns: 0 + 0 + 2 + 0 + 1 = 3
        tns: 2 + 2 + 0 + 0 + 1 = 5
        """
        Theta_true = np.vstack([
            np.repeat(self.Theta_true1[nx, :, :], 2, axis=0),
            np.repeat(self.Theta_true2[nx, :, :], 3, axis=0)
        ])
        Theta_pred = np.vstack([
            np.repeat(self.Theta_pred1[nx, :, :], 3, axis=0),
            self.Theta_pred2[nx, :, :],
            self.Theta_pred3[nx, :, :]
        ])
        return Theta_true, Theta_pred
snippets.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def generate_anchors_pre(height, width, feat_stride, anchor_scales=(8,16,32), anchor_ratios=(0.5,1,2)):
  """ A wrapper function to generate anchors given different scales
    Also return the number of anchors in variable 'length'
  """
  anchors = generate_anchors(ratios=np.array(anchor_ratios), scales=np.array(anchor_scales))
  A = anchors.shape[0]
  shift_x = np.arange(0, width) * feat_stride
  shift_y = np.arange(0, height) * feat_stride
  shift_x, shift_y = np.meshgrid(shift_x, shift_y)
  shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
  K = shifts.shape[0]
  # width changes faster, so here it is H, W, C
  anchors = anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
  anchors = anchors.reshape((K * A, 4)).astype(np.float32, copy=False)
  length = np.int32(anchors.shape[0])

  return anchors, length
draw_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def draw_laser_frustum(pose, zmin=0.0, zmax=10, fov=np.deg2rad(60)): 

    N = 30
    curve = np.vstack([(
        RigidTransform.from_rpyxyz(0, 0, rad, 0, 0, 0) * np.array([[zmax, 0, 0]])) 
             for rad in np.linspace(-fov/2, fov/2, N)])

    curve_w = pose * curve

    faces, edges = [], []
    for cpt1, cpt2 in zip(curve_w[:-1], curve_w[1:]): 
        faces.extend([pose.translation, cpt1, cpt2])
        edges.extend([cpt1, cpt2])

    # Connect the last pt in the curve w/ the current pose, 
    # then connect the the first pt in the curve w/ the curr. pose
    edges.extend([edges[-1], pose.translation])
    edges.extend([edges[0], pose.translation])

    faces = np.vstack(faces)
    edges = np.vstack(edges)
    return (faces, edges)
uw_rgbd.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __init__(self, target, instance, files): 
            self.target = target 
            self.instance = instance
            mask_files = natural_sort(filter(lambda fn: '_maskcrop.png' in fn, files))
            depth_files = natural_sort(filter(lambda  fn: '_depthcrop.png' in fn, files))
            rgb_files = natural_sort(list(set(files) - set(mask_files) - set(depth_files)))
            loc_files = natural_sort(map(lambda fn: fn.replace('_crop.png', '_loc.txt'), rgb_files))

            # Ensure all have equal number of files (Hack! doesn't ensure filename consistency)
            nfiles = np.min([len(loc_files), len(mask_files), len(depth_files), len(rgb_files)])
            mask_files, depth_files, rgb_files, loc_files = mask_files[:nfiles], depth_files[:nfiles], \
                                                            rgb_files[:nfiles], loc_files[:nfiles]

            # print target, instance, len(loc_files), len(mask_files), len(depth_files), len(rgb_files)
            assert(len(mask_files) == len(depth_files) == len(rgb_files) == len(loc_files))

            # Read images
            self.rgb = ImageDatasetReader.from_filenames(rgb_files)
            self.depth = ImageDatasetReader.from_filenames(depth_files)
            self.mask = ImageDatasetReader.from_filenames(mask_files)

            # Read top-left locations of bounding box
            self.locations = np.vstack([np.loadtxt(loc, delimiter=',', dtype=np.int32) 
                                        for loc in loc_files])
sun3d_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def __init__(self, frame=None): 
        """
        Load annotation from json
        """
        self.annotations_ = []

        # Retrieve polygons
        try: 
            polygons = frame['polygon']
        except: 
            return

        # For each polygon
        for poly in polygons: 

            # Get coordinates
            xy = np.vstack([np.float32(poly['x']), 
                            np.float32(poly['y'])]).T

            # Object ID (from local annotation file)
            object_id = poly['object']
            self.add(poly['object'], xy)
recognition_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def im_detect_and_describe(img, mask=None, detector='dense', descriptor='SIFT', colorspace='gray',
                           step=4, levels=7, scale=np.sqrt(2)): 
    """ 
    Describe image using dense sampling / specific detector-descriptor combination. 
    """
    detector = get_detector(detector=detector, step=step, levels=levels, scale=scale)
    extractor = cv2.DescriptorExtractor_create(descriptor)

    try:     
        kpts = detector.detect(img, mask=mask)
        kpts, desc = extractor.compute(img, kpts)

        if descriptor == 'SIFT': 
            kpts, desc = root_sift(kpts, desc)

        pts = np.vstack([kp.pt for kp in kpts]).astype(np.int32)
        return pts, desc

    except Exception as e: 
        print 'im_detect_and_describe', e
        return None, None
recognition_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def im_describe(*args, **kwargs): 
    """ 
    Describe image using dense sampling / specific detector-descriptor combination. 
    Sugar for description-only call. 
    """
    kpts, desc = im_detect_and_describe(*args, **kwargs)
    return desc

# def color_codes(img, kpts): 
#     # Extract color information (Lab)
#     pts = np.vstack([kp.pt for kp in kpts]).astype(np.int32)
#     imgc = median_blur(img, size=5) 
#     cdesc = img[pts[:,1], pts[:,0]]
#     return kpts, np.hstack([desc, cdesc])


# =====================================================================
# General-purpose object recognition interfaces, and functions
# ---------------------------------------------------------------------
camera_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def points_and_normals(self): 
        """
        Returns the point/normals parametrization for planes, 
        including clipped zmin and zmax frustums

        Note: points need to be in CCW
        """

        nv1, fv1 = self._front_back_vertices
        nv2 = np.roll(nv1, -1, axis=0)
        fv2 = np.roll(fv1, -1, axis=0)

        vx = np.vstack([fv1-nv1, nv2[0]-nv1[0], fv1[2]-fv1[1]])
        vy = np.vstack([fv2-fv1, nv2[1]-nv2[0], fv1[1]-fv1[0]])
        pts = np.vstack([fv1, nv1[0], fv1[1]])

        # vx += 1e-12
        # vy += 1e-12

        vx /= np.linalg.norm(vx, axis=1).reshape(-1,1)
        vy /= np.linalg.norm(vy, axis=1).reshape(-1,1)

        normals = np.cross(vx, vy)
        normals /= np.linalg.norm(normals, axis=1).reshape(-1,1)
        return pts, normals
optflow_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def draw_flow(img, flow, step=16):
    h, w = img.shape[:2]
    y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
    fx, fy = flow[y,x].T
    m = np.bitwise_and(np.isfinite(fx), np.isfinite(fy))
    lines = np.vstack([x[m], y[m], x[m]+fx[m], y[m]+fy[m]]).T.reshape(-1, 2, 2)
    lines = np.int32(lines + 0.5)
    vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    cv2.polylines(vis, lines, 0, (0, 255, 0))
    for (x1, y1), (x2, y2) in lines:
        cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
    return vis
validate_hashes.py 文件源码 项目:chash 作者: luhsra 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def plotTimeMultiHistogram(parseTimes, hashTimes, compileTimes, filename): # times in ms
    bins = np.linspace(0, 5000, 50)
    data = np.vstack([parseTimes, hashTimes, compileTimes]).T
    fig, ax = plt.subplots()
    plt.hist(data, bins, alpha=0.7, label=['parsing', 'hashing', 'compiling'], color=[parseColor, hashColor, compileColor])
    plt.legend(loc='upper right')
    plt.xlabel('time [ms]')
    plt.ylabel('#files')
    fig.savefig(filename)

    fig, ax = plt.subplots()
    boxplot_data = [[i/1000 for i in parseTimes], [i/1000 for i in hashTimes], [i/1000 for i in compileTimes]] # times to s
    plt.boxplot(boxplot_data, 0, 'rs', 0, [5, 95])
    plt.xlabel('time [s]')
    plt.yticks([1, 2, 3], ['parsing', 'hashing', 'compiling'])
    #lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) # legend on the right
    fig.savefig(filename[:-4] + '_boxplots' + GRAPH_EXTENSION)
sample.py 文件源码 项目:CausalGAN 作者: mkocaoglu 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def take_product(do_dict):
    '''
    this function takes some dictionary like:
        {key1:1, key2:[a,b], key3:[c,d]}
    and returns the dictionary:
        {key1:[1,1,1], key2[a,a,b,b,],key3[c,d,c,d]}
    computing the product of values
    '''
    values=[]
    for v in do_dict.values():
        if hasattr(v,'__iter__'):
            values.append(v)
        else:
            values.append([v])#allows scalar to be passed

    prod_values=np.vstack(product(*values))
    return {k:np.array(v) for k,v in zip(do_dict.keys(),zip(*prod_values))}
distributions.py 文件源码 项目:CausalGAN 作者: mkocaoglu 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_interv_table(model,intrv=True):

    n_batches=25
    table_outputs=[]
    d_vals=np.linspace(TINY,0.6,n_batches)
    for name in model.cc.node_names:
        outputs=[]
        for d_val in d_vals:
            do_dict={model.cc.node_dict[name].label_logit : d_val*np.ones((model.batch_size,1))}
            outputs.append(model.sess.run(model.fake_labels,do_dict))

        out=np.vstack(outputs)
        table_outputs.append(out)

    table=np.stack(table_outputs,axis=2)

    np.mean(np.round(table),axis=0)

    return table

#dT=pd.DataFrame(index=p_names, data=T, columns=do_names)
#T=np.mean(np.round(table),axis=0)
#table=get_interv_table(model)
spikesorting.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def __loadChnTimeWave(self,f,selectChan):
        times = list()
        waveforms = list()
        spk_startswith = "spike_{0}".format(selectChan)
        for chn_unit in f["spikes"].keys():
            if chn_unit.startswith(spk_startswith):
                time = f["spikes"][chn_unit]["times"].value
                waveform = f["spikes"][chn_unit]["waveforms"].value
                times.append(time)
                waveforms.append(waveform)
        if times:
            times = np.hstack(times)
            waveforms = np.vstack(waveforms)
            sort_index = np.argsort(times)
            waveforms = waveforms[sort_index]
            times = times[sort_index]
            return times,waveforms
        else:
            return None,None
spikesorting.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __load_waveforms(self,selectChan,file_name):
        spk_startswith = "spike_{0}".format(selectChan)
        with hp.File(file_name,"r") as f:
            times = list()
            waveforms = list()
            for chn_unit in f["spikes"].keys():
                if chn_unit.startswith(spk_startswith):
                    tep_time = f["spikes"][chn_unit]["times"].value
                    waveform = f["spikes"][chn_unit]["waveforms"].value
                    times.append(tep_time)
                    waveforms.append(waveform)
            if times:
                times = np.hstack(times)
                waveforms = np.vstack(waveforms)
                sort_index = np.argsort(times)
                waveforms = waveforms[sort_index]
                return waveforms
            else:
                return None
pynnio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _extract_signals(self, data, metadata, lazy):

        signal = None
        if lazy and data.size > 0:
            signal = AnalogSignal([],
                                  units=self._determine_units(metadata),
                                  sampling_period=metadata['dt']*pq.ms)
            signal.lazy_shape = None
        else:
            arr = numpy.vstack(self._extract_array(data, channel_index)
                               for channel_index in range(metadata['first_index'], metadata['last_index'] + 1))
            if len(arr) > 0:
                signal = AnalogSignal(arr.T,
                                      units=self._determine_units(metadata),
                                      sampling_period=metadata['dt']*pq.ms)
        if signal is not None:
            signal.annotate(label=metadata["label"],
                            variable=metadata["variable"])
        return signal
spikesorting.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def __load_waveforms(self,selectChan,file_name):
        spk_startswith = "spike_{0}".format(selectChan)
        with hp.File(file_name,"r") as f:
            times = list()
            waveforms = list()
            for chn_unit in f["spikes"].keys():
                if chn_unit.startswith(spk_startswith):
                    tep_time = f["spikes"][chn_unit]["times"].value
                    waveform = f["spikes"][chn_unit]["waveforms"].value
                    times.append(tep_time)
                    waveforms.append(waveform)
            if times:
                times = np.hstack(times)
                waveforms = np.vstack(waveforms)
                sort_index = np.argsort(times)
                waveforms = waveforms[sort_index]
                return waveforms
            else:
                return None
pynnio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _extract_signals(self, data, metadata, lazy):

        signal = None
        if lazy and data.size > 0:
            signal = AnalogSignal([],
                                  units=self._determine_units(metadata),
                                  sampling_period=metadata['dt']*pq.ms)
            signal.lazy_shape = None
        else:
            arr = numpy.vstack(self._extract_array(data, channel_index)
                               for channel_index in range(metadata['first_index'], metadata['last_index'] + 1))
            if len(arr) > 0:
                signal = AnalogSignal(arr.T,
                                      units=self._determine_units(metadata),
                                      sampling_period=metadata['dt']*pq.ms)
        if signal is not None:
            signal.annotate(label=metadata["label"],
                            variable=metadata["variable"])
        return signal
XGBoostClassifier.py 文件源码 项目:ensemble_amazon 作者: kaz-Anova 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def predict_proba(self, X): 
    try:
      rows=(X.shape[0])
    except:
      rows=len(X)
    X1 = self.build_matrix(X)
    if  self.k_models!=None and len(self.k_models)<2:
        predictions = self.bst.predict(X1)
    else :
        dtest = xgb.DMatrix(X)
        predictions= None
        for gbdt in self.k_models:
            predsnew = gbdt.predict(dtest, ntree_limit=(gbdt.best_iteration+1)*self.num_parallel_tree)  
            if predictions==None:
                predictions=predsnew
            else:
                for g in range (0, predsnew.shape[0]):
                    predictions[g]+=predsnew[g]
        for g in range (0, len(predictions)):
            predictions[g]/=float(len(self.k_models))               
        predictions=np.array(predictions)
    if self.objective == 'multi:softprob': return predictions.reshape( rows, self.num_class)
    return np.vstack([1 - predictions, predictions]).T
trainModel.py 文件源码 项目:Sound-classification-on-Raspberry-Pi-with-Tensorflow 作者: GianlucaPaolocci 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def parse_audio_files(parent_dir,sub_dirs,file_ext='*.wav'):
    ignored = 0
    features, labels, name = np.empty((0,161)), np.empty(0), np.empty(0)
    for label, sub_dir in enumerate(sub_dirs):
        print sub_dir
        for fn in glob.glob(os.path.join(parent_dir, sub_dir, file_ext)):
            try:
                mfccs, chroma, mel, contrast, tonnetz = extract_features(fn)
                ext_features = np.hstack([mfccs, chroma, mel, contrast, tonnetz])
                features = np.vstack([features,ext_features])
                l = [fn.split('-')[1]] * (mfccs.shape[0])
                labels = np.append(labels, l)
        except (KeyboardInterrupt, SystemExit):
        raise
            except:
                ignored += 1
    print "Ignored files: ", ignored
    return np.array(features), np.array(labels, dtype = np.int)
minibatch.py 文件源码 项目:dpl 作者: ppengtang 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _get_image_blob(roidb, scale_inds):
    """Builds an input blob from the images in the roidb at the specified
    scales.
    """
    num_images = len(roidb)
    processed_ims = []
    im_scales = []
    im_shapes = np.zeros((0, 2), dtype=np.float32)
    for i in xrange(num_images):
        im = cv2.imread(roidb[i]['image'])
        if roidb[i]['flipped']:
            im = im[:, ::-1, :]
        target_size = cfg.TRAIN.SCALES[scale_inds[i]]
        im, im_scale, im_shape = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size)
        im_scales.append(im_scale)
        processed_ims.append(im)
        im_shapes = np.vstack((im_shapes, im_shape))

    # Create a blob to hold the input images
    blob = im_list_to_blob(processed_ims)

    return blob, im_scales, im_shapes
gps.py 文件源码 项目:PyGPS 作者: gregstarr 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _block2df(block,obstypes,svnames,svnum):
    """
    input: block of text corresponding to one time increment INTERVAL of RINEX file
    output: 2-D array of float64 data from block. Future: consider whether best to use Numpy, Pandas, or Xray.
    """
    nobs = len(obstypes)
    stride=3

    strio = BytesIO(block.encode())
    barr = np.genfromtxt(strio, delimiter=(14,1,1)*5).reshape((svnum,-1), order='C')

    data = barr[:,0:nobs*stride:stride]
    lli  = barr[:,1:nobs*stride:stride]
    ssi  = barr[:,2:nobs*stride:stride]

    data = np.vstack(([data.T],[lli.T],[ssi.T])).T

    return data


问题


面经


文章

微信
公众号

扫码关注公众号