python类bitwise_and()的实例源码

gesture_hci.py 文件源码 项目:CE264-Computer_Vision 作者: RobinCPC 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def skin_calib(self, raw_yrb):
        mask_skin = cv2.inRange(raw_yrb, self.mask_lower_yrb, self.mask_upper_yrb)
        cal_skin = cv2.bitwise_and(raw_yrb, raw_yrb, mask=mask_skin)
        cv2.imshow('YRB_calib', cal_skin)
        k = cv2.waitKey(5) & 0xFF
        if k == ord('s'):
            self.calib_switch = False
            cv2.destroyWindow('YRB_calib')

        ymin = cv2.getTrackbarPos('Ymin', 'YRB_calib')
        ymax = cv2.getTrackbarPos('Ymax', 'YRB_calib')
        rmin = cv2.getTrackbarPos('CRmin', 'YRB_calib')
        rmax = cv2.getTrackbarPos('CRmax', 'YRB_calib')
        bmin = cv2.getTrackbarPos('CBmin', 'YRB_calib')
        bmax = cv2.getTrackbarPos('CBmax', 'YRB_calib')
        self.mask_lower_yrb = np.array([ymin, rmin, bmin])
        self.mask_upper_yrb = np.array([ymax, rmax, bmax])


# Do skin detection with some filtering
screencp.py 文件源码 项目:OpenAI_Challenges 作者: AlwaysLearningDeeper 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def roi(img,vertices):
    # blank mask:
    mask = np.zeros_like(img)

    # filling pixels inside the polygon defined by "vertices" with the fill color
    cv2.fillPoly(mask, vertices, 255)

    # returning the image only where mask pixels are nonzero
    masked = cv2.bitwise_and(img, mask)
    return masked
color_picker.py 文件源码 项目:Opencv-Python-Examples- 作者: arijitx 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def color_picker(rect):
    global img,img_gray2,hsv
    roi=img[rect[0][1]:rect[1][1],rect[0][0]:rect[1][0]]
    b,g,r,_=np.uint8(cv2.mean(roi))
    color=cv2.cvtColor(np.uint8([[[b,g,r]]]),cv2.COLOR_BGR2HSV)
    h= color[0][0][0]
    # define range of blue color in HSV
    lower = np.array([h-10,50,50])
    upper = np.array([h+10,255,255])

    # Threshold the HSV image to get only blue colors
    mask = cv2.inRange(hsv, lower, upper)

    # Bitwise-AND mask and original image
    res = cv2.bitwise_and(img,img, mask= mask)
    res2=cv2.bitwise_and(img_gray2,img_gray2, mask= cv2.bitwise_not(mask))
    return res+res2
10-PiStorms_icontracker.py 文件源码 项目:PiStorms 作者: mindsensors 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def findSquare( self,frame ):
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (7, 7), 0)
        edged = cv2.Canny(blurred, 60, 60)
        # find contours in the edge map
        (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        # loop over our contours to find hexagon
        cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:50]
        screenCnt = None
        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.004 * peri, True)
            # if our approximated contour has four points, then
            # we can assume that we have found our squeare

            if len(approx) >= 4:
                screenCnt = approx
                x,y,w,h = cv2.boundingRect(c)
                cv2.drawContours(image, [approx], -1, (0, 0, 255), 1)
                #cv2.imshow("Screen", image)
                #create the mask and remove rest of the background
                mask = np.zeros(image.shape[:2], dtype = "uint8")
                cv2.drawContours(mask, [screenCnt], -1, 255, -1)
                masked = cv2.bitwise_and(image, image, mask = mask)
                #cv2.imshow("Masked",masked  )
                #crop the masked image to to be compared to referance image
                cropped = masked[y:y+h,x:x+w]
                #scale the image so it is fixed size as referance image
                cropped = cv2.resize(cropped, (200,200), interpolation =cv2.INTER_AREA)

                return cropped
webcam_track_blobs.py 文件源码 项目:pc-drone 作者: perrytsao 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def add_blobs(crop_frame):
    frame=cv2.GaussianBlur(crop_frame, (3, 3), 0)
    # Convert BGR to HSV
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    # define range of green color in HSV
    lower_green = np.array([70,50,50])
    upper_green = np.array([85,255,255])
    # Threshold the HSV image to get only blue colors
    mask = cv2.inRange(hsv, lower_green, upper_green)
    mask = cv2.erode(mask, None, iterations=1)
    mask = cv2.dilate(mask, None, iterations=1)    
    # Bitwise-AND mask and original image
    res = cv2.bitwise_and(frame,frame, mask= mask)
    detector = cv2.SimpleBlobDetector_create(params)
    # Detect blobs.
    reversemask=255-mask
    keypoints = detector.detect(reversemask)
    if keypoints:
        print "found blobs"
        if len(keypoints) > 4:
            keypoints.sort(key=(lambda s: s.size))
            keypoints=keypoints[0:3]
        # Draw detected blobs as red circles.
        # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
        im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    else:
        print "no blobs"
        im_with_keypoints=crop_frame

    return im_with_keypoints #, max_blob_dist, blob_center, keypoint_in_orders
gesture_hci.py 文件源码 项目:CE264-Computer_Vision 作者: RobinCPC 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def skin_detect(self, raw_yrb, img_src):
        # use median blurring to remove signal noise in YCRCB domain
        raw_yrb = cv2.medianBlur(raw_yrb, 5)
        mask_skin = cv2.inRange(raw_yrb, self.mask_lower_yrb, self.mask_upper_yrb)

        # morphological transform to remove unwanted part
        kernel = np.ones((5, 5), np.uint8)
        #mask_skin = cv2.morphologyEx(mask_skin, cv2.MORPH_OPEN, kernel)
        mask_skin = cv2.dilate(mask_skin, kernel, iterations=2)

        res_skin = cv2.bitwise_and(img_src, img_src, mask=mask_skin)
        #res_skin_dn = cv2.fastNlMeansDenoisingColored(res_skin, None, 10, 10, 7,21)

        return res_skin


# Do background subtraction with some filtering
CV2.py 文件源码 项目:reconstruction 作者: microelly2 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def execute_ColorSpace(proxy,obj):

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')
    hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)

    lower = np.array([max(obj.h1-obj.h2,0),max(obj.s1-obj.s2,0),max(obj.v1-obj.v2,0)])
    upper = np.array([min(obj.h1+obj.h2,255),min(obj.s1+obj.s2,255),min(obj.v1+obj.v2,255)])
    say("ee")
    say(lower)
    say(upper)

    mask = cv2.inRange(hsv, lower, upper)
    mask = cv2.inRange(img, lower, upper)

    res = cv2.bitwise_and(img,img, mask= mask)

    obj.Proxy.img=res
CV2.py 文件源码 项目:reconstruction 作者: microelly2 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def execute_HSV(proxy,obj):

    say("hsv ..")

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    lower=np.array([obj.valueColor-obj.deltaColor,0,0])
    upper=np.array([obj.valueColor+obj.deltaColor,255,255])
    mask = cv2.inRange(hsv, lower, upper)

    res = cv2.bitwise_and(hsv,hsv, mask= mask)

    obj.Proxy.img=res
project_v2.py 文件源码 项目:SelfDrivingCar 作者: aguijarro 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def region_of_interest(img, vertices):
    """
    Applies an image mask.
    Only keeps the region of the image defined by the polygon
    formed from `vertices`. The rest of the image is set to black.
    """
    # defining a blank mask to start with
    mask = np.zeros_like(img)
    # defining a 3 channel or 1 channel color to fill the mask with depending on the input image
    if len(img.shape) > 2:
        channel_count = img.shape[2]  # i.e. 3 or 4 depending on your image
        ignore_mask_color = (255,) * channel_count
    else:
        ignore_mask_color = 255
    # filling pixels inside the polygon defined by "vertices" with the fill color
    cv2.fillPoly(mask, vertices, ignore_mask_color)
    # returning the image only where mask pixels are nonzero
    masked_image = cv2.bitwise_and(img, mask)
    return masked_image
argumentation_utils.py 文件源码 项目:bot2017Fin 作者: AllanYiin 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def equal_color(img: Image, color):
    arr_img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
    arr_img = cv2.resize(arr_img, (img.size[0] * 10, img.size[1] * 10))
    boundaries = []
    boundaries.append(([max(color[2] - 15, 0), max(color[1] - 15, 0), max(color[0] - 15, 0)],
                       [min(color[2] + 15, 255), min(color[1] + 15, 255), min(color[0] + 15, 255)]))
    for (lower, upper) in boundaries:
        lower = np.array(lower, dtype="uint8")
        upper = np.array(upper, dtype="uint8")

        # find the colors within the specified boundaries and apply
        # the mask
        mask = cv2.inRange(arr_img, lower, upper)
        res = cv2.bitwise_and(arr_img, arr_img, mask=mask)
        res = cv2.resize(res, (img.size[0], img.size[1]))
        cv2_im = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
        output_img = Image.fromarray(cv2_im)

        return output_img
frying.py 文件源码 项目:DeepFryBot 作者: asdvek 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def find_chars(img):
    gray = np.array(img.convert("L"))
    ret, mask = cv2.threshold(gray, 180, 255, cv2.THRESH_BINARY)
    image_final = cv2.bitwise_and(gray, gray, mask=mask)
    ret, new_img = cv2.threshold(image_final, 180, 255, cv2.THRESH_BINARY_INV)
    kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
    dilated = cv2.dilate(new_img, kernel, iterations=1)
    # Image.fromarray(dilated).save('out.png') # for debugging
    _, contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

    coords = []
    for contour in contours:
        # get rectangle bounding contour
        [x, y, w, h] = cv2.boundingRect(contour)
        # ignore large chars (probably not chars)
        if w > 70 and h > 70:
            continue
        coords.append((x, y, w, h))
    return coords


# find list of eye coordinates in image
manual_park.py 文件源码 项目:AutonomousParking 作者: jovanduy 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def overlay_img(self):
        """Overlay the transparent, transformed image of the arc onto our CV image"""
        #overlay the arc on the image
        rows, cols, channels = self.transformed.shape
        roi = self.cv_image[0:rows, 0:cols]

        #change arc_image to grayscale
        arc2gray = cv2.cvtColor(self.transformed, cv2.COLOR_BGR2GRAY)
        ret, mask = cv2.threshold(arc2gray, 10, 255, cv2.THRESH_BINARY)
        mask_inv = cv2.bitwise_not(mask)

        #black out area of arc in ROI
        img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
        img2_fg = cv2.bitwise_and(self.transformed, self.transformed, mask=mask)

        #put arc on ROI and modify the main image
        dst = cv2.add(img1_bg, img2_fg)
        self.cv_image[0:rows, 0:cols] = dst
extract_color.py 文件源码 项目:python-image-processing 作者: karaage0703 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def extract_color( src, h_th_low, h_th_up, s_th, v_th ):
    hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)
    h, s, v = cv2.split(hsv)
    if h_th_low > h_th_up:
        ret, h_dst_1 = cv2.threshold(h, h_th_low, 255, cv2.THRESH_BINARY) 
        ret, h_dst_2 = cv2.threshold(h, h_th_up,  255, cv2.THRESH_BINARY_INV)

        dst = cv2.bitwise_or(h_dst_1, h_dst_2)
    else:
        ret, dst = cv2.threshold(h,   h_th_low, 255, cv2.THRESH_TOZERO) 
        ret, dst = cv2.threshold(dst, h_th_up,  255, cv2.THRESH_TOZERO_INV)
        ret, dst = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY)

    ret, s_dst = cv2.threshold(s, s_th, 255, cv2.THRESH_BINARY)
    ret, v_dst = cv2.threshold(v, v_th, 255, cv2.THRESH_BINARY)
    dst = cv2.bitwise_and(dst, s_dst)
    dst = cv2.bitwise_and(dst, v_dst)
    return dst
utils.py 文件源码 项目:histonets-cv 作者: sul-cidr 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def output_as_mask(f):
    """Decorator to add a return_mask option when image and mask are being
    returned"""

    def wrapper(*args, **kwargs):
        return_mask = kwargs.pop('return_mask', False)
        image, mask = f(*args, **kwargs)
        if return_mask:
            return mask
        else:
            return cv2.bitwise_and(image, image, mask=mask)

    wrapper.__name__ = f.__name__
    wrapper.__doc__ = """{}
    The binary mask can also be returned instead by setting
    return_mask to True.""".format(f.__doc__)
    return wrapper
lane_detection_module.py 文件源码 项目:diy_driverless_car_ROS 作者: wilselby 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def binary_thresh( img,  boundaries,  filter):

     if filter == 'RGB':
         frame_to_thresh = img.copy()
     else:
         frame_to_thresh = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

     for (lower,  upper) in boundaries:

         # create numpy arrays from the boundaries
         lower = np.array(lower,  dtype = "uint8")
         upper = np.array(upper,  dtype = "uint8")

         # find the colors within the specified boundaries and apply the mask
         mask = cv2.inRange(frame_to_thresh,  lower,  upper)
         output = cv2.bitwise_and(frame_to_thresh, frame_to_thresh,  mask = mask)   #Returns an RGB image

     return mask
lane_detection_module.py 文件源码 项目:diy_driverless_car_ROS 作者: wilselby 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def region_of_interest(img, vertices):
    """
    Applies an image mask.

    Only keeps the region of the image defined by the polygon
    formed from `vertices`. The rest of the image is set to black.
    """
    #defining a blank mask to start with
    mask = np.zeros_like(img)   

    #defining a 3 channel or 1 channel color to fill the mask with depending on the input image
    if len(img.shape) > 2:
        channel_count = img.shape[2]  # i.e. 3 or 4 depending on your image
        ignore_mask_color = (255,) * channel_count
    else:
        ignore_mask_color = 255

    #filling pixels inside the polygon defined by "vertices" with the fill color    
    cv2.fillPoly(mask, vertices, ignore_mask_color)

    #returning the image only where mask pixels are nonzero
    masked_image = cv2.bitwise_and(img, mask)
    return masked_image
color_segment.py 文件源码 项目:cv_build18 作者: eknight7 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def print_hsv(x):
    if x == 1:
        H_low = cv2.getTrackbarPos("H_low", "Segmented")
        S_low = cv2.getTrackbarPos("S_low", "Segmented")
        V_low = cv2.getTrackbarPos("V_low", "Segmented")
        H_high = cv2.getTrackbarPos("H_high", "Segmented")
        S_high = cv2.getTrackbarPos("S_high", "Segmented")
        V_high = cv2.getTrackbarPos("V_high", "Segmented")

        low = np.array([H_low, S_low, V_low])
        high = np.array([H_high, S_high, V_high])
        print "HSV Low: ", low, ", High: ", high

        save_name = 'seg' + '_lh_' + str(low[0]) + '_ls_' + str(low[1]) + '_lv_' + str(low[2]) \
                    + '_hh_' + str(high[0]) + '_hs_' + str(high[1]) + '_hv_' + str(high[2]) \
                    + '_' + img_str

        mask = cv2.inRange(birdsHSV, low, high)
        result = cv2.bitwise_and(birdsImg, birdsImg, mask = mask)
        res_name = '../results/' + save_name
        print "Saving result as", res_name
        cv2.imwrite(res_name, result)
preProcessing.py 文件源码 项目:Conquest_kshitij 作者: pigeon-kgp 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def blob__Detec(image):
    img=copy(image)
    height, width, channels = img.shape
    new_img=np.ones((height,width,channels), np.uint8)
    HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    Yellow={'min':(20,100,100),'max':(30, 255, 255)}
    Blue={'min':(50,100,100),'max':(100,255,255)}
    Brown={'min':(0,100,0),'max':(20,255,255)}



    mask_b=cv2.inRange(HSV,Blue['min'],Blue['max'])
    mask_br=cv2.inRange(HSV,Brown['min'],Brown['max'])
    mask_y=cv2.inRange(HSV,Yellow['min'],Yellow['max'])

    blue=cv2.bitwise_and(img,img,mask=mask_b)
    yellow=cv2.bitwise_and(img,img,mask=mask_y)
    brown=cv2.bitwise_and(img,img,mask=mask_br)

    new_img=cv2.add(blue,brown)
    new_img=cv2.add(new_img,yellow)

    return new_img
hsvmoder.py 文件源码 项目:flight-stone 作者: asmateus 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def hsvModer(self, index, hsv_valueT, hsv_value_B):
        img_BGR = self.img[index]
        img_RGB = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2RGB)
        img_HSV = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2HSV)

        lower_red = np.array(hsv_value_B)
        upper_red = np.array(hsv_valueT)

        mask = cv2.inRange(img_HSV, lower_red, upper_red)
        res = cv2.bitwise_and(img_RGB, img_RGB, mask=mask)
        if self.erosion:
            kernel = np.ones((5, 5), np.uint8)
            res = cv2.erode(res, kernel, iterations=1)
        if self.dilate:
            kernel = np.ones((9, 9), np.uint8)
            res = cv2.dilate(res, kernel, iterations=1)

        return res
Detector.py 文件源码 项目:UAV-ODF 作者: rixmit 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def matchDetectionsOverlap(self, detections, truthDetections, frameSize):
        self.log.info("Calculating overlap for each detection..")

        truthCanvas = np.zeros((frameSize[0], frameSize[1], 1), np.uint8)
        for truthDetection in truthDetections:
            truthCanvas[truthDetection['ymin']:truthDetection['ymax'], truthDetection['xmin']:truthDetection['xmax']] = 255

        overlapRatios = []
        for detection in detections:
            detectionCanvas = np.zeros((frameSize[0], frameSize[1], 1), np.uint8)
            detectionCanvas[detection['ymin']:detection['ymax'], detection['xmin']:detection['xmax']] = 255

            canvas = cv2.bitwise_and(detectionCanvas, truthCanvas)
            detectionCount = np.count_nonzero(detectionCanvas)
            overlapCount = np.count_nonzero(canvas)
            overlap = (overlapCount*1.0)/detectionCount

            overlapRatios.append(overlap)

        return overlapRatios
filter.py 文件源码 项目:PicFilter 作者: dhuadaar 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def render(self,frame):
        numDownSamples = 2
        img_rgb = frame
        # number of downscaling steps
        numBilateralFilters = 7
        # number of bilateral filtering steps
        # -- STEP 1 --
        # downsample image using Gaussian pyramid
        img_color = img_rgb
        for _ in xrange(numDownSamples):
            img_color = cv2.pyrDown(img_color)
        # repeatedly apply small bilateral filter instead of applying
        # one large filter
        for _ in xrange(numBilateralFilters):
            img_color = cv2.bilateralFilter(img_color, 9, 9, 7)

        # upsample image to original size
        for _ in xrange(numDownSamples):
            img_color = cv2.pyrUp(img_color)
        # convert to grayscale and apply median blur
        img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
        img_blur = cv2.medianBlur(img_gray, 7)

        # detect and enhance edges
        img_edge = cv2.adaptiveThreshold(img_blur, 255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, 9, 2)
        # -- STEP 5 --
        # convert back to color so that it can be bit-ANDed with color image
        img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)
        final = cv2.bitwise_and(img_color, img_edge)
        return cv2.medianBlur(final,7)
project3.py 文件源码 项目:Self-Driving-Car-ND-Predict-Steering-Angle-with-CV 作者: sjamthe 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def yellowgrayscale(image):
    #enhance yellow then find grayscale

    #image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    # define range of yellow color in HSV
    #lower = np.array([40,40,40])
    #upper = np.array([150,255,255])

    #RGB limits
    lower = np.array([80,80,40])
    upper = np.array([255,255,80])

    # Threshold the HSV image to get only yellow colors
    mask = cv2.inRange(image, lower, upper)
    #show_image('mask',mask)

    # Bitwise-AND mask and original image
    res = cv2.bitwise_and(image,image, mask= mask)
    res = cv2.addWeighted(res, 1.0, image, 1.0, 0)
    res = grayscale(res)

    return res
project3.py 文件源码 项目:Self-Driving-Car-ND-Predict-Steering-Angle-with-CV 作者: sjamthe 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def region_of_interest(img, vertices):
    """
    Applies an image mask.

    Only keeps the region of the image defined by the polygon
    formed from `vertices`. The rest of the image is set to black.
    """
    #defining a blank mask to start with
    mask = np.zeros_like(img)

    #defining a 3 channel or 1 channel color to fill the mask with depending on the input image
    if len(img.shape) > 2:
        channel_count = img.shape[2]  # i.e. 3 or 4 depending on your image
        ignore_mask_color = (255,) * channel_count
    else:
        ignore_mask_color = 255

    #filling pixels inside the polygon defined by "vertices" with the fill color
    cv2.fillPoly(mask, vertices, ignore_mask_color)

    #returning the image only where mask pixels are nonzero
    masked_image = cv2.bitwise_and(img, mask)
    return masked_image
image_preparator.py 文件源码 项目:autonomous_driving 作者: StatueFungus 项目源码 文件源码 阅读 54 收藏 0 点赞 0 评论 0
def filter_color(self, image, lower_color, upper_color):
        '''
            Methode maskiert Bereiche auf einem Bild welche nicht im mitgegebenen
            HSV Farbraum liegen.

            Parameter
            ---------
            image : Bild
            lower_color : Tupel
                >> (h,s,v)
            upper_color : Tupel
                >> (h,s,v)

            Rückgabe
            ---------
            image : Bild

        '''
        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        lower_color = np.array(lower_color, np.uint8)
        upper_color = np.array(upper_color, np.uint8)

        color_mask = cv2.inRange(hsv, lower_color, upper_color)
        return cv2.bitwise_and(image, image, mask=color_mask)
scanner.py 文件源码 项目:robik 作者: RecunchoMaker 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def get_color(self, frame, color1, color2):
        mask = cv2.inRange(self.hsv, color1, color2)
        res = cv2.bitwise_and(frame, frame, mask = mask)
        return res
metrics.py 文件源码 项目:detection-2016-nipsws 作者: imatge-upc 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def calculate_iou(img_mask, gt_mask):
    gt_mask *= 1.0
    img_and = cv2.bitwise_and(img_mask, gt_mask)
    img_or = cv2.bitwise_or(img_mask, gt_mask)
    j = np.count_nonzero(img_and)
    i = np.count_nonzero(img_or)
    iou = float(float(j)/float(i))
    return iou
metrics.py 文件源码 项目:detection-2016-nipsws 作者: imatge-upc 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def calculate_overlapping(img_mask, gt_mask):
    gt_mask *= 1.0
    img_and = cv2.bitwise_and(img_mask, gt_mask)
    j = np.count_nonzero(img_and)
    i = np.count_nonzero(gt_mask)
    overlap = float(float(j)/float(i))
    return overlap
vision_util.py 文件源码 项目:Vision2016 作者: Team3309 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def hsv_threshold(img, hue_min, hue_max, sat_min, sat_max, val_min, val_max):
    """
    Threshold an HSV image given separate min/max values for each channel.
    :param img: an hsv image
    :param hue_min:
    :param hue_max:
    :param sat_min:
    :param sat_max:
    :param val_min:
    :param val_max:
    :return: result of the threshold (each binary channel AND'ed together)
    """

    hue, sat, val = cv2.split(img)

    hue_bin = np.zeros(hue.shape, dtype=np.uint8)
    sat_bin = np.zeros(sat.shape, dtype=np.uint8)
    val_bin = np.zeros(val.shape, dtype=np.uint8)

    cv2.inRange(hue, hue_min, hue_max, hue_bin)
    cv2.inRange(sat, sat_min, sat_max, sat_bin)
    cv2.inRange(val, val_min, val_max, val_bin)

    bin = np.copy(hue_bin)
    cv2.bitwise_and(sat_bin, bin, bin)
    cv2.bitwise_and(val_bin, bin, bin)

    return bin
gesture_hci.py 文件源码 项目:CE264-Computer_Vision 作者: RobinCPC 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def background_subtract(self, img_src):
        fgmask = self.fgbg.apply(cv2.GaussianBlur(img_src, (25, 25), 0))
        kernel = np.ones((5, 5), np.uint8)
        fgmask = cv2.dilate(fgmask, kernel, iterations=2)
        #fgmask = self.fgbg.apply(cv2.medianBlur(img_src, 11))
        org_fg = cv2.bitwise_and(img_src, img_src, mask=fgmask)
        return org_fg

# Update Position of ROI
piwall.py 文件源码 项目:piwall-cvtools 作者: infinnovation 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def maskLogoOverImage(self):
        # Load two images
        img1 = cv2.imread('messi5.jpg')
        img2 = cv2.imread('opencv_logo.png')

        # I want to put logo on top-left corner, So I create a ROI
        rows,cols,channels = img2.shape
        roi = img1[0:rows, 0:cols ]

        # Now create a mask of logo and create its inverse mask also
        img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
        ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
        mask_inv = cv2.bitwise_not(mask)

        # Now black-out the area of logo in ROI
        img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)

        # Take only region of logo from logo image.
        img2_fg = cv2.bitwise_and(img2,img2,mask = mask)

        # Put logo in ROI and modify the main image
        dst = cv2.add(img1_bg,img2_fg)
        img1[0:rows, 0:cols ] = dst

        cv2.imshow('res',img1)
        cv2.waitKey(0)
        cv2.destroyAllWindows()


#####################################################################################################################
# Prototypes & Convenient CLI/GUI Dispatcher to rebuild mental picture of where we are/repeat on new platforms.
#####################################################################################################################


问题


面经


文章

微信
公众号

扫码关注公众号