python类split()的实例源码

CV2.py 文件源码 项目:reconstruction 作者: microelly2 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def execute_Color(proxy,obj):

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    b,g,r = cv2.split(img)

    if obj.red:
        obj.Proxy.img = cv2.cvtColor(r, cv2.COLOR_GRAY2RGB)
    if obj.blue:
        obj.Proxy.img = cv2.cvtColor(b, cv2.COLOR_GRAY2RGB)
    if obj.green:
        obj.Proxy.img = cv2.cvtColor(255-g, cv2.COLOR_GRAY2RGB)
CV_combiner.py 文件源码 项目:reconstruction 作者: microelly2 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def animpingpong(self):
        obj=self.Object

        res=None
        for t in obj.OutList:
            print t.Label
            img=t.ViewObject.Proxy.img.copy()
            if res==None:
                res=img.copy()
            else:
                #rr=cv2.subtract(res,img)
                #rr=cv2.add(res,img)

                aw=0.0+float(obj.aWeight)/100
                bw=0.0+float(obj.bWeight)/100
                print aw
                print bw
                if obj.aInverse:
                    # b umsetzen
                    ret, mask = cv2.threshold(img, 50, 255, cv2.THRESH_BINARY)
                    img=cv2.bitwise_not(mask)
                rr=cv2.addWeighted(res,aw,img,bw,0)
                res=rr
        #b,g,r = cv2.split(res)
        cv2.imshow(obj.Label,res)
        #cv2.imshow(obj.Label +" b",b)
        #cv2.imshow(obj.Label + " g",g)
        #cv2.imshow(obj.Label + " r",r)

        res=img

        if not obj.matplotlib:
            cv2.imshow(obj.Label,img)
        else:
            from matplotlib import pyplot as plt
            # plt.subplot(121),
            plt.imshow(img,cmap = 'gray')
            plt.title(obj.Label), plt.xticks([]), plt.yticks([])
            plt.show()

        self.img=img
piwall.py 文件源码 项目:piwall-cvtools 作者: infinnovation 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def binaryContoursNestingFilterHeuristic(img, cnts, *args, **kwargs):
    '''
    Concept  : Use the found contours, with binary drawn contours to extract hierarchy and hence filter on nesting.
    Critique : WIP
    '''
    # Set the image to black (0): 
    img[:,:] = (0,0,0)
    # Draw all of the contours on the image in white
    contours = [c.contour for c in cnts]
    cv2.drawContours( img, contours, -1, (255, 255, 255), 1 )
    iv = ImageViewer(img)
    iv.windowShow()
    # Now extract any channel
    gray = cv2.split(img)[0]
    iv = ImageViewer(gray)
    iv.windowShow()
    retval, bin = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)
    iv = ImageViewer(bin)
    iv.windowShow()
    # Now find the contours again, but this time we care about hierarchy (hence _TREE) - we get back next, previous, first_child, parent
    bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    iv = ImageViewer(bin)
    iv.windowShow()
    # Alternative flags : only take the external contours
    bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    iv = ImageViewer(bin)
    iv.windowShow()
    return cnts
piwall.py 文件源码 项目:piwall-cvtools 作者: infinnovation 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def comment(self, msg):
        if type(msg) == type(''):
            self.fh.writelines('%s\n' % msg)
        else:
            lines = '%s' % msg
            newlines = []
            for l in lines.split('\n'):
                newlines.append('\t%s' % l)
            self.fh.writelines('\n'.join(newlines))
        print('%s' % msg)
vwriter.py 文件源码 项目:piwall-cvtools 作者: infinnovation 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def addFrame(self, frame, width=300):
        frame = imutils.resize(frame, width)

        # check if the writer is None
        if self.writer is None:
            # store the image dimensions, initialzie the video writer,
            # and construct the zeros array
            (self.h, self.w) = frame.shape[:2]
            self.writer = cv2.VideoWriter(self.output, self.fourcc, self.fps,
                                          (self.w * 2, self.h * 2), True)
            self.zeros = np.zeros((self.h, self.w), dtype="uint8")

        # break the image into its RGB components, then construct the
        # RGB representation of each frame individually
        (B, G, R) = cv2.split(frame)
        R = cv2.merge([self.zeros, self.zeros, R])
        G = cv2.merge([self.zeros, G, self.zeros])
        B = cv2.merge([B, self.zeros, self.zeros])

        # construct the final output frame, storing the original frame
        # at the top-left, the red channel in the top-right, the green
        # channel in the bottom-right, and the blue channel in the
        # bottom-left
        output = np.zeros((self.h * 2, self.w * 2, 3), dtype="uint8")
        output[0:self.h, 0:self.w] = frame
        output[0:self.h, self.w:self.w * 2] = R
        output[self.h:self.h * 2, self.w:self.w * 2] = G
        output[self.h:self.h * 2, 0:self.w] = B

        # write the output frame to file
        self.writer.write(output)
ColorLayoutComputer.py 文件源码 项目:imgpedia 作者: scferrada 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def compute(self, img):
        averages = np.zeros((self.rows,self.cols,3))
        imgH, imgW, _ = img.shape
        for row in range(self.rows):
            for col in range(self.cols):
                slice = img[imgH/self.rows * row: imgH/self.rows * (row+1), imgW/self.cols*col : imgW/self.cols*(col+1)]
                average_color_per_row = np.mean(slice, axis=0)
                average_color = np.mean(average_color_per_row, axis=0)
                average_color = np.uint8(average_color)
                averages[row][col][0] = average_color[0]
                averages[row][col][1] = average_color[1]
                averages[row][col][2] = average_color[2]
        icon = cv2.cvtColor(np.array(averages, dtype=np.uint8), cv2.COLOR_BGR2YCR_CB)
        y, cr, cb = cv2.split(icon)
        dct_y = cv2.dct(np.float32(y))
        dct_cb = cv2.dct(np.float32(cb))
        dct_cr = cv2.dct(np.float32(cr))
        dct_y_zigzag = []
        dct_cb_zigzag = []
        dct_cr_zigzag = []
        flip = True
        flipped_dct_y = np.fliplr(dct_y)
        flipped_dct_cb = np.fliplr(dct_cb)
        flipped_dct_cr = np.fliplr(dct_cr)
        for i in range(self.rows + self.cols -1):
            k_diag = self.rows - 1 - i
            diag_y = np.diag(flipped_dct_y, k=k_diag)
            diag_cb = np.diag(flipped_dct_cb, k=k_diag)
            diag_cr = np.diag(flipped_dct_cr, k=k_diag)
            if flip:
                diag_y = diag_y[::-1]
                diag_cb = diag_cb[::-1]
                diag_cr = diag_cr[::-1]
            dct_y_zigzag.append(diag_y)
            dct_cb_zigzag.append(diag_cb)
            dct_cr_zigzag.append(diag_cr)
            flip = not flip
        return np.concatenate([np.concatenate(dct_y_zigzag), np.concatenate(dct_cb_zigzag), np.concatenate(dct_cr_zigzag)])
test_scene_detector.py 文件源码 项目:ATX 作者: NetEaseGame 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_detect():
    dev = AndroidDeviceMinicap()
    dev._adb.start_minitouch()
    time.sleep(3)

    d = SceneDetector('txxscene')
    old, new = None, None
    while True:
        # time.sleep(0.3)
        screen = dev.screenshot_cv2()
        h, w = screen.shape[:2]
        img = cv2.resize(screen, (w/2, h/2))

        # find hsv
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
        _, _, V = cv2.split(hsv)
        V[V<150] = 0
        cv2.imshow('V', V)
        _, _, L = cv2.split(hls)
        L[L<150] = 0
        cv2.imshow('H', L)

        tic = time.clock()
        new = str(d.detect(img))
        t = time.clock() - tic
        if new != old:
            print 'change to', new
            print 'cost time', t
        old = new

        for _, r in d.current_scene:
            x, y, x1, y1 = r
            cv2.rectangle(img, (x,y), (x1,y1), (0,255,0) ,2)
        cv2.imshow('test', img)
        cv2.waitKey(1)
scene_detector.py 文件源码 项目:ATX 作者: NetEaseGame 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def find_match(img, tmpl, rect=None, mask=None):
    if rect is not None:
        h, w = img.shape[:2]
        x, y, x1, y1 = rect
        if x1 > w or y1 > h:
            return 0, None
        img = img[y:y1, x:x1, :]

        if mask is not None:
            img = img.copy()
            img[mask!=0] = 0
            tmpl = tmpl.copy()
            tmpl[mask!=0] = 0

    s_bgr = cv2.split(tmpl) # Blue Green Red
    i_bgr = cv2.split(img)

    weight = (0.3, 0.3, 0.4)
    resbgr = [0, 0, 0]
    for i in range(3): # bgr
        resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED)
    match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
    confidence = max_val
    x, y = max_loc
    h, w = tmpl.shape[:2]
    if rect is None:
        rect = (x, y, x+w, y+h)
    # cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0) ,2)
    # cv2.imshow('test', img)
    # cv2.waitKey(20)
    return confidence, rect
increase_picture.py 文件源码 项目:tensorflow-pi 作者: karaage0703 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def equalizeHistRGB(src):

    RGB = cv2.split(src)
    Blue   = RGB[0]
    Green = RGB[1]
    Red    = RGB[2]
    for i in range(3):
        cv2.equalizeHist(RGB[i])

    img_hist = cv2.merge([RGB[0],RGB[1], RGB[2]])
    return img_hist

# ????????
opencv_functions.py 文件源码 项目:RealtimeFacialEmotionRecognition 作者: sushant3095 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def rgb(bgr_img):
    b,g,r = cv.split(bgr_img)       # get b,g,r
    rgb_img = cv.merge([r,g,b])     # switch it to rgb
    return rgb_img

# Given directory loc, get all images in directory and crop to just faces
# Returns face_list, an array of cropped image file names
opencv_functions.py 文件源码 项目:RealtimeFacialEmotionRecognition 作者: sushant3095 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def toggleRGB(img):
  r,g,b = cv.split(img)
  img = cv.merge([b,g,r])
  return img

# Combine two images for displaying side-by-side
# If maxSize is true, crops sides of image to keep under 2880 pixels in width
functions.py 文件源码 项目:cvloop 作者: shoeffner 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def load_hat(self, path):  # pylint: disable=no-self-use
        """Loads the hat from a picture at path.

        Args:
            path: The path to load from

        Returns:
            The hat data.
        """
        hat = cv2.imread(path, cv2.IMREAD_UNCHANGED)
        if hat is None:
            raise ValueError('No hat image found at `{}`'.format(path))
        b, g, r, a = cv2.split(hat)
        return cv2.merge((r, g, b, a))
DataRow.py 文件源码 项目:face-landmark 作者: lsy17096535 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def predict(self, resized):
        """
        @resized: image 40,40 already pre processed 
        """         
        #self.net.blobs['data'].data[...] = cv2.split(resized)
        self.net.blobs['data'].data[...] = resized.reshape(1,1,60,60)
        prediction = self.net.forward()['Dense3'][0]
        return prediction
preprocessing.py 文件源码 项目:pycolor_detection 作者: parth1993 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def removebg(segmented_img):
    src = cv2.imdecode(np.squeeze(np.asarray(segmented_img[1])), 1)
    tmp = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
    _, alpha = cv2.threshold(tmp, 0, 255, cv2.THRESH_BINARY)
    b, g, r = cv2.split(src)
    rgba = [b, g, r, alpha]
    dst = cv2.merge(rgba, 4)
    processed_img = cv2.imencode('.png', dst)

    return processed_img
test_scene_detector.py 文件源码 项目:AutomatorX 作者: xiaoyaojjian 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_detect():
    dev = AndroidDeviceMinicap()
    dev._adb.start_minitouch()
    time.sleep(3)

    d = SceneDetector('txxscene')
    old, new = None, None
    while True:
        # time.sleep(0.3)
        screen = dev.screenshot_cv2()
        h, w = screen.shape[:2]
        img = cv2.resize(screen, (w/2, h/2))

        # find hsv
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
        _, _, V = cv2.split(hsv)
        V[V<150] = 0
        cv2.imshow('V', V)
        _, _, L = cv2.split(hls)
        L[L<150] = 0
        cv2.imshow('H', L)

        tic = time.clock()
        new = str(d.detect(img))
        t = time.clock() - tic
        if new != old:
            print 'change to', new
            print 'cost time', t
        old = new

        for _, r in d.current_scene:
            x, y, x1, y1 = r
            cv2.rectangle(img, (x,y), (x1,y1), (0,255,0) ,2)
        cv2.imshow('test', img)
        cv2.waitKey(1)
scene_detector.py 文件源码 项目:AutomatorX 作者: xiaoyaojjian 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def find_match(img, tmpl, rect=None, mask=None):
    if rect is not None:
        h, w = img.shape[:2]
        x, y, x1, y1 = rect
        if x1 > w or y1 > h:
            return 0, None
        img = img[y:y1, x:x1, :]

        if mask is not None:
            img = img.copy()
            img[mask!=0] = 0
            tmpl = tmpl.copy()
            tmpl[mask!=0] = 0

    s_bgr = cv2.split(tmpl) # Blue Green Red
    i_bgr = cv2.split(img)

    weight = (0.3, 0.3, 0.4)
    resbgr = [0, 0, 0]
    for i in range(3): # bgr
        resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED)
    match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
    confidence = max_val
    x, y = max_loc
    h, w = tmpl.shape[:2]
    if rect is None:
        rect = (x, y, x+w, y+h)
    # cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0) ,2)
    # cv2.imshow('test', img)
    # cv2.waitKey(20)
    return confidence, rect
feature_extractor.py 文件源码 项目:traffic-light-detection 作者: ranveeraggarwal 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def extract_features():
    pos_img_path = positive_images_path
    neg_img_path = negative_images_path

    pos_feat_path = positive_features_path
    neg_feat_path = negative_features_path

    if not os.path.isdir(pos_feat_path):
        os.makedirs(pos_feat_path)

    if not os.path.isdir(neg_feat_path):
        os.makedirs(neg_feat_path)

    print "Extracting positive features"
    progress = 0.0
    for im_path in glob.glob(os.path.join(pos_img_path, "*")):
        im = imread(im_path)
        im_ycbcr = cv2.cvtColor(im, cv2.COLOR_RGB2YCR_CB)
        im = cv2.split(im_ycbcr)[0]
        feature_vector = hog(image=im, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(3, 3), visualise=False)
        feature_name = os.path.split(im_path)[1].split(".")[0] + ".feat"
        feature_path = os.path.join(pos_feat_path, feature_name)
        joblib.dump(feature_vector, feature_path)
        progress += 1.0
        update_progress(progress/float(len(glob.glob(os.path.join(pos_img_path, "*")))))

    print "Extracting negative features"
    progress = 0.0
    for im_path in glob.glob(os.path.join(neg_img_path, "*")):
        im = imread(im_path)
        im_ycbcr = cv2.cvtColor(im, cv2.COLOR_RGB2YCR_CB)
        im = cv2.split(im_ycbcr)[0]
        feature_vector = hog(image=im, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(3, 3), visualise=False)
        feature_name = os.path.split(im_path)[1].split(".")[0] + ".feat"
        feature_path = os.path.join(neg_feat_path, feature_name)
        joblib.dump(feature_vector, feature_path)
        progress += 1.0
        update_progress(progress/float(len(glob.glob(os.path.join(neg_img_path, "*")))))
test_classifier.py 文件源码 项目:traffic-light-detection 作者: ranveeraggarwal 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_regions(roi_path):
    f = open(roi_path, 'r').read()
    f = f.split("\n")
    f = [i.split() for i in f]
    return f
test_classifier.py 文件源码 项目:traffic-light-detection 作者: ranveeraggarwal 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_classifier(img_path, roi_path):
    model_path = classifier_model_path
    # Load the classifier
    clf = joblib.load(model_path)

    max_win_y = 171
    max_win_x = 70

    detections = []

    regions = get_regions(roi_path)

    im = imread(img_path)
    im_ycbcr = cv2.cvtColor(im, cv2.COLOR_RGB2YCR_CB)
    im = cv2.split(im_ycbcr)[0]

    for region in regions:
        x = int(float(region[0])*1000)
        y = int(float(region[1])*1000)

        im_window = im[y: y + max_win_y, x: x + max_win_x]

        fd = hog(image=im_window, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(3, 3), visualise=False)

        if len(fd) == 9234:
            prediction = clf.predict(fd.reshape(1, -1))

            if prediction == 1:
                print "Detection:: Location -> ({}, {})".format(x, y)
                print "Confidence Score {} \n".format(clf.decision_function(fd))
                detections.append((x, y, clf.decision_function(fd)))

    im = imread(img_path)
    im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)

    for (x_tl, y_tl, _) in detections:
        cv2.rectangle(im, (x_tl, y_tl), (x_tl+max_win_x, y_tl+max_win_y), (0, 255, 0), thickness=1)
    cv2.imwrite("result.png", im)
lara_cropper.py 文件源码 项目:traffic-light-detection 作者: ranveeraggarwal 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def gen_neg():
    progress = 0.0
    cropped_images = []
    for i in range(9000):
        frame_number = str(random.randint(0, 11178))
        frame = 'frame_' + '0'*(6-len(frame_number)) + frame_number + '.jpg'
        img = cv2.imread("../lara_data/images/" + frame)
        height, width = img.shape[:2]
        x = random.randint(max_window_size[0], width - max_window_size[0])
        y = random.randint(max_window_size[1], height - max_window_size[1])
        up_limit = y - max_window_size[1]/2
        down_limit = y + max_window_size[1]/2
        left_limit = x - max_window_size[0]/2
        right_limit = x + max_window_size[0]/2
        cropped_img = img[up_limit: down_limit, left_limit: right_limit]
        h, w = cropped_img.shape[:2]
        if int(w) == int(max_window_size[0]) and int(h) == int(max_window_size[1]):
            cropped_images.append(cropped_img)
        progress += 1.0
        update_progress(progress/float(9000))

    print("Generating Negative Images")

    progress = 0.0

    i = 0
    for cropped_image in cropped_images:
        # out_image = cv2.cvtColor(cropped_image, cv2.COLOR_RGB2YCR_CB)
        # out_image = cv2.split(out_image)[0]
        out_image = cropped_image
        image_name = "neg" + str(i) + ".ppm"
        image_path = os.path.join(neg_img_path, image_name)
        cv2.imwrite(image_path, out_image)
        progress += 1.0
        i += 1
        update_progress(progress/float(len(cropped_images)))


问题


面经


文章

微信
公众号

扫码关注公众号