python类cvtColor()的实例源码

tslsr.py 文件源码 项目:Speedy-TSLSR 作者: talhaHavadar 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def __bound_contours(roi):
    """
        returns modified roi(non-destructive) and rectangles that founded by the algorithm.
        @roi region of interest to find contours
        @return (roi, rects)
    """

    roi_copy = roi.copy()
    roi_hsv = cv2.cvtColor(roi, cv2.COLOR_RGB2HSV)
    # filter black color
    mask1 = cv2.inRange(roi_hsv, np.array([0, 0, 0]), np.array([180, 255, 125]))
    mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
    mask1 = cv2.Canny(mask1, 100, 300)
    mask1 = cv2.GaussianBlur(mask1, (1, 1), 0)
    mask1 = cv2.Canny(mask1, 100, 300)

    # mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))

    # Find contours for detected portion of the image
    im2, cnts, hierarchy = cv2.findContours(mask1.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5] # get largest five contour area
    rects = []
    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)
        x, y, w, h = cv2.boundingRect(approx)
        if h >= 15:
            # if height is enough
            # create rectangle for bounding
            rect = (x, y, w, h)
            rects.append(rect)
            cv2.rectangle(roi_copy, (x, y), (x+w, y+h), (0, 255, 0), 1);

    return (roi_copy, rects)
calibration_camera.py 文件源码 项目:SelfDrivingCar 作者: aguijarro 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_points():

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((6*8,3), np.float32)
    objp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1 , 2)

    # Arrays to store object points and image points from all the images.
    objpoints = [] # 3d points in real world space
    imgpoints = [] # 2d points in image plane.

    # Make a list of calibration images
    images = glob.glob('calibration_wide/GO*.jpg')

    # Step through the list and search for chessboard corners
    for idx, fname in enumerate(images):
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Find the chessboard corners
        ret, corners = cv2.findChessboardCorners(gray, (8,6), None)

        # If found, add object points, image points
        if ret == True:
            objpoints.append(objp)
            imgpoints.append(corners)

            # Draw and display the corners
            cv2.drawChessboardCorners(img, (8,6), corners, ret)
            #write_name = 'corners_found'+str(idx)+'.jpg'
            #cv2.imwrite(write_name, img)
            cv2.imshow('img', img)
            cv2.waitKey(500)

    cv2.destroyAllWindows()
    return objpoints, imgpoints
image_test.py 文件源码 项目:facial_emotion_recognition 作者: adamaulia 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_image(addr):
    target = ['angry','disgust','fear','happy','sad','surprise','neutral']
    font = cv2.FONT_HERSHEY_SIMPLEX

    im = cv2.imread(addr)
    gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray,scaleFactor=1.1)

    for (x, y, w, h) in faces:
            cv2.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 2,5)
            face_crop = im[y:y+h,x:x+w]
            face_crop = cv2.resize(face_crop,(48,48))
            face_crop = cv2.cvtColor(face_crop, cv2.COLOR_BGR2GRAY)
            face_crop = face_crop.astype('float32')/255
            face_crop = np.asarray(face_crop)
            face_crop = face_crop.reshape(1, 1,face_crop.shape[0],face_crop.shape[1])
            result = target[np.argmax(model.predict(face_crop))]
            cv2.putText(im,result,(x,y), font, 1, (200,0,0), 3, cv2.LINE_AA)

    cv2.imshow('result', im)
    cv2.imwrite('result.jpg',im)
    cv2.waitKey(0)
calibration_camera.py 文件源码 项目:SelfDrivingCar 作者: aguijarro 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def corners_unwarp(img, nx, ny, undistorted):
    M = None
    warped = np.copy(img)
    # Use the OpenCV undistort() function to remove distortion
    undist = undistorted
    # Convert undistorted image to grayscale
    gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
    # Search for corners in the grayscaled image
    ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)

    if ret == True:
        # If we found corners, draw them! (just for fun)
        cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
        # Choose offset from image corners to plot detected corners
        # This should be chosen to present the result at the proper aspect ratio
        # My choice of 100 pixels is not exact, but close enough for our purpose here
        offset = 100 # offset for dst points
        # Grab the image shape
        img_size = (gray.shape[1], gray.shape[0])

        # For source points I'm grabbing the outer four detected corners
        src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
        # For destination points, I'm arbitrarily choosing some points to be
        # a nice fit for displaying our warped result
        # again, not exact, but close enough for our purposes
        dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
                                     [img_size[0]-offset, img_size[1]-offset],
                                     [offset, img_size[1]-offset]])
        # Given src and dst points, calculate the perspective transform matrix
        M = cv2.getPerspectiveTransform(src, dst)
        # Warp the image using OpenCV warpPerspective()
        warped = cv2.warpPerspective(undist, M, img_size)

    # Return the resulting image and matrix
    return warped, M
byFaceDetection.py 文件源码 项目:Easitter 作者: TomoyaFujita2016 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def detectFace(image):
    cascadePath = "/usr/local/opt/opencv/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml"
    FACE_SHAPE = 0.45
    result = image.copy()
    imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    cascade = cv2.CascadeClassifier(cascadePath)
    faceRect = cascade.detectMultiScale(imageGray, scaleFactor=1.1, minNeighbors=1, minSize=(1,1))

    if len(faceRect) <= 0:
        return False
    else:
        # confirm face
        imageSize = image.shape[0] * image.shape[1]
        #print("d1")
        filteredFaceRects = []
        for faceR in faceRect:
            faceSize = faceR[2]*faceR[3]
            if FACE_SHAPE > min(faceR[2], faceR[3])/max(faceR[2], faceR[3]):
                break
            filteredFaceRects.append(faceR)

        if len(filteredFaceRects) > 0:
            return True
        else:
            return False
flo2img.py 文件源码 项目:Deep360Pilot-optical-flow 作者: yenchenlin 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def convert_wrapper(path, outpath, Debug=False):
    for filename in sorted(os.listdir(path)):
        if filename.endswith('.flo'):
            filename = filename.replace('.flo','')

            flow = read_flow(path, filename)
            flow_img = convert_flow(flow, 2.0)

            # NOTE: Change from BGR (OpenCV format) to RGB (Matlab format) to fit Matlab output
            flow_img = cv2.cvtColor(flow_img, cv2.COLOR_BGR2RGB)

            #print "Saving {}.png with shape: {}".format(filename, flow_img.shape)
            cv2.imwrite(outpath + filename + '.png', flow_img)

            if Debug:
                ret = imchecker(outpath + filename)



# Sanity check and comparison if we have matlab version image
calibrator.py 文件源码 项目:camera_calibration_frontend 作者: groundmelon 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _get_corners(img, board, refine = True, checkerboard_flags=0):
    """
    Get corners for a particular chessboard for an image
    """
    h = img.shape[0]
    w = img.shape[1]
    if len(img.shape) == 3 and img.shape[2] == 3:
        mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
        mono = img
    (ok, corners) = cv2.findChessboardCorners(mono, (board.n_cols, board.n_rows), flags = cv2.CALIB_CB_ADAPTIVE_THRESH |
                                              cv2.CALIB_CB_NORMALIZE_IMAGE | checkerboard_flags)
    if not ok:
        return (ok, corners)

    # If any corners are within BORDER pixels of the screen edge, reject the detection by setting ok to false
    # NOTE: This may cause problems with very low-resolution cameras, where 8 pixels is a non-negligible fraction
    # of the image size. See http://answers.ros.org/question/3155/how-can-i-calibrate-low-resolution-cameras
    BORDER = 8
    if not all([(BORDER < corners[i, 0, 0] < (w - BORDER)) and (BORDER < corners[i, 0, 1] < (h - BORDER)) for i in range(corners.shape[0])]):
        ok = False

    if refine and ok:
        # Use a radius of half the minimum distance between corners. This should be large enough to snap to the
        # correct corner, but not so large as to include a wrong corner in the search window.
        min_distance = float("inf")
        for row in range(board.n_rows):
            for col in range(board.n_cols - 1):
                index = row*board.n_rows + col
                min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + 1, 0]))
        for row in range(board.n_rows - 1):
            for col in range(board.n_cols):
                index = row*board.n_rows + col
                min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + board.n_cols, 0]))
        radius = int(math.ceil(min_distance * 0.5))
        cv2.cornerSubPix(mono, corners, (radius,radius), (-1,-1),
                                      ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 ))

    return (ok, corners)
sudoku_steps.py 文件源码 项目:pyku 作者: dubvulture 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, filename, folder=None, classifier=None):
        """
        :param filename: image with sudoku
        :param folder: folder where to save debug images
        :param classifier: digit classifier
        """
        self.filename = os.path.basename(filename)
        image = cv2.imread(filename)
        self.image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        self.folder = folder or FOLDER
        os.mkdir(os.path.join(self.folder, 'debug/'))
        self.classifier = classifier or DigitClassifier()
        # Default initial values
        self.perspective = False
        self.debug = True
        self.counter = 0
        self.step = -1
ImageExtractor.py 文件源码 项目:SudokuSolver 作者: Anve94 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def apply_filters(self, image, denoise=False):
        """ This method is used to apply required filters to the
            to extracted regions of interest. Every square in a
            sudoku square is considered to be a region of interest,
            since it can potentially contain a value. """
        # Convert to grayscale
        source_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        # Denoise the grayscale image if requested in the params
        if denoise:
            denoised_gray = cv2.fastNlMeansDenoising(source_gray, None, 9, 13)
            source_blur = cv2.GaussianBlur(denoised_gray, BLUR_KERNEL_SIZE, 3)
            # source_blur = denoised_gray
        else:
            source_blur = cv2.GaussianBlur(source_gray, (3, 3), 3)
        source_thresh = cv2.adaptiveThreshold(source_blur, 255, 0, 1, 5, 2)
        kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
        source_eroded = cv2.erode(source_thresh, kernel, iterations=1)
        source_dilated = cv2.dilate(source_eroded, kernel, iterations=1)
        if ENABLE_PREVIEW_ALL:
            image_preview(source_dilated)
        return source_dilated
Capture_Img_To_Drive.py 文件源码 项目:Mini-Projects 作者: gaborvecsei 项目源码 文件源码 阅读 46 收藏 0 点赞 0 评论 0
def CaptureImage():
    imageName = 'DontCare.jpg' #Just a random string
    cap = cv2.VideoCapture(0)
    while(True):
        # Capture frame-by-frame
        ret, frame = cap.read()

        #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #For capture image in monochrome
        rgbImage = frame #For capture the image in RGB color space

        # Display the resulting frame
        cv2.imshow('Webcam',rgbImage)
        #Wait to press 'q' key for capturing
        if cv2.waitKey(1) & 0xFF == ord('q'):
            #Set the image name to the date it was captured
            imageName = str(time.strftime("%Y_%m_%d_%H_%M")) + '.jpg'
            #Save the image
            cv2.imwrite(imageName, rgbImage)
            break
    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
    #Returns the captured image's name
    return imageName
main.py 文件源码 项目:specularity-removal 作者: gmichaeljaison 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _resolve_spec(im1, im2):
    im = im1.copy()

    img1 = cv.cvtColor(im1, cv.COLOR_BGR2GRAY)
    img2 = cv.cvtColor(im2, cv.COLOR_BGR2GRAY)

    # Best pixel selection criteria
    #   1. Pixel difference should be more than 20. (just an experimentally value. Free to change!)
    #   2. Best pixel should have less intensity
    #   3. pixel should not be pure black. (just an additional constraint
    #       to remove black background created by warping)
    mask = np.logical_and((img1 - img2) > DIFF_THRESHOLD, img1 > img2)
    mask = np.logical_and(mask, img2 != 0)

    im[mask] = im2[mask]
    return im
cut.py 文件源码 项目:yonkoma2data 作者: esuji5 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def homography(self, img, outdir_name=''):
        orig = img
        # 2??????
        gray = cv2.cvtColor(orig, cv2.COLOR_BGR2GRAY)
        gauss = cv2.GaussianBlur(gray, (5, 5), 0)
        canny = cv2.Canny(gauss, 50, 150)

        # 2??????????
        contours = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[1]
        # ???????????
        contours.sort(key=cv2.contourArea, reverse=True)

        if len(contours) > 0:
            arclen = cv2.arcLength(contours[0], True)
            # ???????????
            approx = cv2.approxPolyDP(contours[0], 0.01 * arclen, True)
            # warp = approx.copy()
            if len(approx) >= 4:
                self.last_approx = approx.copy()
            elif self.last_approx is not None:
                approx = self.last_approx
        else:
            approx = self.last_approx
        rect = self.get_rect_by_points(approx)
        # warped = self.transform_by4(orig, warp[:, 0, :])
        return orig[rect[0]:rect[1], rect[2]:rect[3]]
vis_light_points.py 文件源码 项目:esys-pbi 作者: fsxfreak 项目源码 文件源码 阅读 78 收藏 0 点赞 0 评论 0
def update(self,frame,events):
        falloff = self.falloff

        img = frame.img
        pts = [denormalize(pt['norm_pos'],frame.img.shape[:-1][::-1],flip_y=True) for pt in events.get('gaze_positions',[]) if pt['confidence']>=self.g_pool.min_data_confidence]

        overlay = np.ones(img.shape[:-1],dtype=img.dtype)

        # draw recent gaze postions as black dots on an overlay image.
        for gaze_point in pts:
            try:
                overlay[int(gaze_point[1]),int(gaze_point[0])] = 0
            except:
                pass

        out = cv2.distanceTransform(overlay,cv2.DIST_L2, 5)

        # fix for opencv binding inconsitency
        if type(out)==tuple:
            out = out[0]

        overlay =  1/(out/falloff+1)

        img[:] = np.multiply(img, cv2.cvtColor(overlay,cv2.COLOR_GRAY2RGB), casting="unsafe")
CV2.py 文件源码 项目:reconstruction 作者: microelly2 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def execute_Threshold(proxy,obj):

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    # img = cv2.imread('dave.jpg',0) ??
    img = cv2.medianBlur(img,5)
    img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)


    if obj.globalThresholding:
        ret,th1 = cv2.threshold(img,obj.param1,obj.param2,cv2.THRESH_BINARY)
        obj.Proxy.img = cv2.cvtColor(th1, cv2.COLOR_GRAY2RGB)

    if obj.adaptiveMeanTresholding:
        th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
                cv2.THRESH_BINARY,11,2)
        obj.Proxy.img = cv2.cvtColor(th2, cv2.COLOR_GRAY2RGB)

    if obj.adaptiveGaussianThresholding:
        th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv2.THRESH_BINARY,17,2)
        obj.Proxy.img = cv2.cvtColor(th3, cv2.COLOR_GRAY2RGB)
DetectFaces.py 文件源码 项目:FaceDetected 作者: ttchin 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def detect_faces_from_picture(pic_file_path):
    print(">>> Let me check this picture: " + pic_file_path)
    frame = cv2.imread(pic_file_path)

    # Detect faces in the frame
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray_frame, 1.3, 5)

    # Match the detected faces with the trained model
    if len(faces) > 0:
        print(">>> Someone is in the picture!")
        for (x, y, w, h) in faces:
            face = frame[y:y+h, x:x+w]
            result = model.predict(face)
            for index, name in model.getTrainCfg():
                if result == index:
                    print(">>> Aha, it's %s!" % name)
lsun_bedroom_line2color.py 文件源码 项目:chainer-cyclegan 作者: Aixile 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_example(self, i):
        id = self.all_keys[i]
        img = None
        val = self.db.get(id.encode())

        img = cv2.imdecode(np.fromstring(val, dtype=np.uint8), 1)
        img = self.do_augmentation(img)

        img_color = img
        img_color = self.preprocess_image(img_color)

        img_line = XDoG(img)
        img_line = cv2.cvtColor(img_line, cv2.COLOR_GRAY2RGB)
        #if img_line.ndim == 2:
        #    img_line = img_line[:, :, np.newaxis]
        img_line = self.preprocess_image(img_line)

        return img_line, img_color
sudoku_steps.py 文件源码 项目:pyku 作者: dubvulture 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def extract_corners(self, image):
        """
        Find the 4 corners of a binary image
        :param image: binary image
        :return: 4 main vertices or None
        """
        cnts, _ = cv2.findContours(image.copy(),
                                   cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)[-2:]
        cnt = cnts[0]
        _, _, h, w = cv2.boundingRect(cnt)
        epsilon = min(h, w) * 0.5
        o_vertices = cv2.approxPolyDP(cnt, epsilon, True)
        vertices = cv2.convexHull(o_vertices, clockwise=True)
        vertices = self.correct_vertices(vertices)

        if self.debug:
            temp = cv2.cvtColor(image.copy(), cv2.COLOR_GRAY2BGR)
            cv2.drawContours(temp, cnts, -1, (0, 255, 0), 10)
            cv2.drawContours(temp, o_vertices, -1, (255, 0, 0), 30)
            cv2.drawContours(temp, vertices, -1, (0, 0, 255), 20)
            self.save2image(temp)

        return vertices
hsv-tuner.py 文件源码 项目:RunescapeBots 作者: lukegarbutt 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def print_img_array(self):
        img = self.take_screenshot('array')
        #converts image to HSV 
        img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        # gets the values from the sliders
        low_hue = self.low_hue.get()
        low_sat = self.low_sat.get()
        low_val = self.low_val.get()
        # gets upper values from sliders
        high_hue = self.high_hue.get()
        high_sat = self.high_sat.get()
        high_val = self.high_val.get()
        lower_color = np.array([low_hue,low_sat,low_val]) 
        upper_color= np.array([high_hue,high_sat,high_val])
        #creates the mask and result
        mask = cv2.inRange(self.hsv_image, lower_color, upper_color)
        mask = np.array(mask)
        mask.view


# Instance of Tkinter
scanner.py 文件源码 项目:robik 作者: RecunchoMaker 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def get_color_medio(self, roi, a,b,imprimir = False):
        xl,yl,ch = roi.shape
        roiyuv = cv2.cvtColor(roi,cv2.COLOR_RGB2YUV)
        roihsv = cv2.cvtColor(roi,cv2.COLOR_RGB2HSV)
        h,s,v=cv2.split(roihsv)
        mask=(h<5)
        h[mask]=200

        roihsv = cv2.merge((h,s,v))
        std = np.std(roiyuv.reshape(xl*yl,3),axis=0)
        media = np.mean(roihsv.reshape(xl*yl,3), axis=0)-60
        mediayuv = np.mean(roiyuv.reshape(xl*yl,3), axis=0)

        if std[0]<12 and std[1]<12 and std[2]<12:
        #if (std[0]<15 and std[2]<15) or ((media[0]>100 or media[0]<25) and (std[0]>10)):
            media = np.mean(roihsv.reshape(xl*yl,3), axis=0)
            # el amarillo tiene 65 de saturacion y sobre 200
            if media[1]<60: #and (abs(media[0]-30)>10):
                # blanco
                return [-10,0,0]
            else:
                return media
        else:
            return None
test.py 文件源码 项目:yolo_tensorflow 作者: hizhangp 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def detect(self, img):
        img_h, img_w, _ = img.shape
        inputs = cv2.resize(img, (self.image_size, self.image_size))
        inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
        inputs = (inputs / 255.0) * 2.0 - 1.0
        inputs = np.reshape(inputs, (1, self.image_size, self.image_size, 3))

        result = self.detect_from_cvmat(inputs)[0]

        for i in range(len(result)):
            result[i][1] *= (1.0 * img_w / self.image_size)
            result[i][2] *= (1.0 * img_h / self.image_size)
            result[i][3] *= (1.0 * img_w / self.image_size)
            result[i][4] *= (1.0 * img_h / self.image_size)

        return result
main.py 文件源码 项目:guided-filter 作者: lisabug 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_color():
    image = cv2.imread('data/Lenna.png')
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    noise = (np.random.rand(image.shape[0], image.shape[1], 3) - 0.5) * 50
    image_noise = image + noise

    radius = [1, 2, 4]
    eps = [0.005]

    combs = list(itertools.product(radius, eps))

    vis.plot_single(to_32F(image), title='origin')
    vis.plot_single(to_32F(image_noise), title='noise')

    for r, e in combs:
        GF = GuidedFilter(image, radius=r, eps=e)
        vis.plot_single(to_32F(GF.filter(image_noise)), title='r=%d, eps=%.3f' % (r, e))
color_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 62 收藏 0 点赞 0 评论 0
def colormap(im, min_threshold=0.01):
    mask = im<min_threshold
    if im.ndim == 1: 
        print im
        hsv = np.zeros((len(im), 3), dtype=np.uint8)
        hsv[:,0] = (im * 180).astype(np.uint8)
        hsv[:,1] = 255
        hsv[:,2] = 255
        bgr = cv2.cvtColor(hsv.reshape(-1,1,3), cv2.COLOR_HSV2BGR).reshape(-1,3)
        bgr[mask] = 0
    else: 
        hsv = np.zeros((im.shape[0], im.shape[1], 3), np.uint8)
        hsv[...,0] = (im * 180).astype(np.uint8)
        hsv[...,1] = 255
        hsv[...,2] = 255
        bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        bgr[mask] = 0
    return bgr
optflow_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def draw_flow(img, flow, step=16):
    h, w = img.shape[:2]
    y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
    fx, fy = flow[y,x].T
    m = np.bitwise_and(np.isfinite(fx), np.isfinite(fy))
    lines = np.vstack([x[m], y[m], x[m]+fx[m], y[m]+fy[m]]).T.reshape(-1, 2, 2)
    lines = np.int32(lines + 0.5)
    vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    cv2.polylines(vis, lines, 0, (0, 255, 0))
    for (x1, y1), (x2, y2) in lines:
        cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
    return vis
image_color_augment.py 文件源码 项目:Tensormodels 作者: asheshjain399 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def random_saturation(img, label, lower=0.5, upper=1.5):
    """
    Multiplies saturation with a constant and clips the value between [0,1.0]
    Args:
        img: input image in float32
        label: returns label unchanged
        lower: lower val for sampling
        upper: upper val for sampling
    """
    alpha = lower + (upper - lower) * rand.rand()
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    # saturation should always be within [0,1.0]
    hsv[:, :, 1] = np.clip(alpha * hsv[:, :, 1], 0.0, 1.0)

    return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), label
image_color_augment.py 文件源码 项目:Tensormodels 作者: asheshjain399 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def random_hue(img, label, max_delta=10):
    """
    Rotates the hue channel
    Args:
        img: input image in float32
        max_delta: Max number of degrees to rotate the hue channel
    """
    # Rotates the hue channel by delta degrees
    delta = -max_delta + 2.0 * max_delta * rand.rand()
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    hchannel = hsv[:, :, 0]
    hchannel = delta + hchannel

    # hue should always be within [0,360]
    idx = np.where(hchannel > 360)
    hchannel[idx] = hchannel[idx] - 360
    idx = np.where(hchannel < 0)
    hchannel[idx] = hchannel[idx] + 360

    hsv[:, :, 0] = hchannel
    return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), label
imgreader.py 文件源码 项目:DmsMsgRcg 作者: bshao001 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_image_features(self, img_file, stride=5, padding=True):
        """
        Take an image file as input, and output an array of image features whose matrix size is
        based on the image size. When no padding, and the image size is smaller than the required
        feature space size (in x or y direction), the image is not checked, and this method will
        return a tuple of two empty lists; When padding is True, and the image size is more than
        4 pixels smaller than the require feature space size (in x or y direction), the image is
        not checked either. This method can be used by both the trainer and predictor.
        Args:
            img_file: The file name of the image.
            stride: Optional. The stride of the sliding.
            padding: Optional. Whether to pad the image to fit the feature space size or to
                discard the extra pixels if padding is False.
        Returns:
            coordinates: A list of coordinates, each of which contains y and x that are the top
                left corner offsets of the sliding window.
            features: A matrix (python list), in which each row contains the features of the
                sampling sliding window, while the number of rows depends on the image size of
                the input.
        """
        img = cv2.imread(img_file)
        img_arr = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        return self.get_image_array_features(img_arr, stride, padding)
cars.py 文件源码 项目:OCV_Vehicles_Features 作者: dan-masek 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def process_frame(frame_number, frame, keypoint_data, detector, matcher):
    log = logging.getLogger("process_frame")

    # Create a copy of source frame to draw into
    processed = frame.copy()

    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    kp, des = detector.detectAndCompute(frame, None)

    # Match descriptors
    matches = matcher.match(keypoint_data.descriptors, des)

    # Sort them in order of distance
    matches = sorted(matches, key = lambda x:x.distance)

    processed = drawMatches(cv2.imread('car.png',0), keypoint_data.keypoints, gray_frame, kp, matches[:])

    return processed

# ============================================================================
goal_test.py 文件源码 项目:Vision2016 作者: Team3309 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def check_image(name):
    expected_data = json.loads(open('./img/' + name + '.json').read())
    if not expected_data['enabled']:
        return

    expected_targets = expected_data['targets']

    img = cv2.imread('./img/' + name + '.jpg', cv2.IMREAD_COLOR)
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    args = config.copy()
    args['img'] = hsv
    args['output_images'] = {}

    actual_targets = find(**args)

    # make sure same number of targets are detected
    assert len(expected_targets) == len(actual_targets)

    # targets is a list of 2-tuples with expected and actual results
    targets = zip(expected_targets, actual_targets)
    # compare all the different features of targets to make sure they match
    for pair in targets:
        expected, actual = pair
        # make sure that the targets are close to where they are supposed to be
        assert is_close(expected['pos']['x'], actual['pos']['x'], 0.02)
        assert is_close(expected['pos']['y'], actual['pos']['y'], 0.02)
        # make sure that the targets are close to the size they are supposed to be
        assert is_close(expected['size']['width'], actual['size']['width'], 0.02)
        assert is_close(expected['size']['height'], actual['size']['height'], 0.02)
server.py 文件源码 项目:Vision2016 作者: Team3309 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def handle_image(img):
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    new_data_condition.acquire()
    state['img'] = hsv
    args = config['target'].copy()
    args['img'] = hsv
    args['draw_output'] = state['draw_output']
    args['output_images'] = {}

    targets = vision.find(**args)
    state['targets'] = targets
    state['output_images'] = args['output_images']
    new_data_condition.notify_all()
    new_data_condition.release()

    fps, processing_time = update_fps()
    state['fps'] = round(fps, 1)
    print 'Processed in', processing_time, 'ms, max fps =', round(fps_smoothed, 1)
preprocess.py 文件源码 项目:speed 作者: keon 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def optical_flow(one, two):
    """
    method taken from (https://chatbotslife.com/autonomous-vehicle-speed-estimation-from-dashboard-cam-ca96c24120e4)
    """
    one_g = cv2.cvtColor(one, cv2.COLOR_RGB2GRAY)
    two_g = cv2.cvtColor(two, cv2.COLOR_RGB2GRAY)
    hsv = np.zeros((120, 320, 3))
    # set saturation
    hsv[:,:,1] = cv2.cvtColor(two, cv2.COLOR_RGB2HSV)[:,:,1]
    # obtain dense optical flow paramters
    flow = cv2.calcOpticalFlowFarneback(one_g, two_g, flow=None,
                                        pyr_scale=0.5, levels=1, winsize=15,
                                        iterations=2,
                                        poly_n=5, poly_sigma=1.1, flags=0)
    # convert from cartesian to polar
    mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
    # hue corresponds to direction
    hsv[:,:,0] = ang * (180/ np.pi / 2)
    # value corresponds to magnitude
    hsv[:,:,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
    # convert HSV to int32's
    hsv = np.asarray(hsv, dtype= np.float32)
    rgb_flow = cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB)
    return rgb_flow


问题


面经


文章

微信
公众号

扫码关注公众号