def transform_cv_rect(rects):
"""Transform the rects from opencv method minAreaRect to our rects.
Step 1 of Figure 5 in seglink paper
In cv2.minAreaRect, the w, h and theta values in the returned rect are not convenient to use (at least for me), so
the Oriented (or rotated) Rectangle object in seglink algorithm is defined different from cv2.
Rect definition in Seglink:
1. The angle value between a side and x-axis is:
positive: if it rotates clockwisely, with y-axis increasing downwards.
negative: if it rotates counter-clockwisely.
This is opposite to cv2, and it is only a personal preference.
2. The width is the length of side taking a smaller absolute angle with the x-axis.
3. The theta value of a rect is the signed angle value between width-side and x-axis
4. To rotate a rect to horizontal direction, just rotate its width-side horizontally,
i.e., rotate it by a angle of theta using cv2 method.
(see the method rotate_oriented_bbox_to_horizontal for rotation detail)
Args:
rects: ndarray with shape = (5, ) or (N, 5).
Return:
transformed rects.
"""
only_one = False
if len(np.shape(rects)) == 1:
rects = np.expand_dims(rects, axis = 0)
only_one = True
assert np.shape(rects)[1] == 5, 'The shape of rects must be (N, 5), but meet %s'%(str(np.shape(rects)))
rects = np.asarray(rects, dtype = np.float32).copy()
num_rects = np.shape(rects)[0]
for idx in xrange(num_rects):
cx, cy, w, h, theta = rects[idx, ...];
#assert theta < 0 and theta >= -90, "invalid theta: %f"%(theta)
if abs(theta) > 45 or (abs(theta) == 45 and w < h):
w, h = [h, w]
theta = 90 + theta
rects[idx, ...] = [cx, cy, w, h, theta]
if only_one:
return rects[0, ...]
return rects
python类minAreaRect()的实例源码
def filterContoursFancy(contours, image=None):
if len(contours) == 0:
return []
numContours = len(contours)
areas = np.array([cv2.contourArea(contour) for contour in contours])
boundingRects = [cv2.boundingRect(contour) for contour in contours]
widths, heights, positions = boundingInfo(boundingRects)
rotatedRects = [cv2.minAreaRect(contour) for contour in contours]
if config.withOpenCV3:
rotatedBoxes = [np.int0(cv2.boxPoints(rect)) for rect in rotatedRects]
else:
rotatedBoxes = [np.int0(cv2.cv.BoxPoints(rect)) for rect in rotatedRects]
rotatedAreas = [cv2.contourArea(box) for box in rotatedBoxes]
sizeScores = [size(area)for area in areas]
ratioScores = ratios(widths, heights)
rotationScores = [rotation(rect) for rect in rotatedRects]
rectangularScores = [distToPolygon(contour, poly) for contour,poly in zip(contours, rotatedBoxes)]
areaScores = polygonAreaDiff(areas, rotatedAreas)
quadScores = [Quadrify(contour) for contour in contours]
rectangularScores = np.divide(rectangularScores, widths)
scores = np.array([sizeScores, ratioScores, rotationScores, rectangularScores, areaScores, quadScores])
contourScores = np.dot(weights, scores)
correctInds, incorrectInds = sortedInds(contourScores)
correctContours = np.array(contours)[correctInds]
if config.extra_debug:
print "size, ratio, rotation, rectangular, area, quad"
print "Weights:", weights
print "Scores: ", contourScores
print np.average(scores, axis=1)
if len(incorrectInds) != 0:
print "AVG, WORST", test(scores, correctInds, incorrectInds)
for i in range(numContours):
print "CONTOUR " + str(i)
print np.multiply(scores[:, i], weights) #newWeights
print contourScores[i]
if image:
img = copy.deepcopy(image)
Printing.drawImage(img, contours[:i] + contours[i+1:], contours[i], False)
Printing.display(img, "contour " + str(i), doResize=True)
cv2.waitKey(0)
cv2.destroyAllWindows()
return correctContours
def filterContoursAutocalibrate(contours, image=None):
if len(contours) == 0:
return []
numContours = len(contours)
areas = np.array([cv2.contourArea(contour) for contour in contours])
boundingRects = [cv2.boundingRect(contour) for contour in contours]
widths, heights, positions = boundingInfo(boundingRects)
rotatedRects = [cv2.minAreaRect(contour) for contour in contours]
if config.withOpenCV3:
rotatedBoxes = [np.int0(cv2.boxPoints(rect)) for rect in rotatedRects]
else:
rotatedBoxes = [np.int0(cv2.cv.BoxPoints(rect)) for rect in rotatedRects]
rotatedAreas = [cv2.contourArea(box) for box in rotatedBoxes]
sizeScores = [size(area)for area in areas]
ratioScores = ratios(widths, heights)
rotationScores = [rotation(rect) for rect in rotatedRects]
rectangularScores = [distToPolygon(contour, poly) for contour,poly in zip(contours, rotatedBoxes)]
areaScores = polygonAreaDiff(areas, rotatedAreas)
quadScores = [Quadrify(contour) for contour in contours]
rectangularScores = np.divide(rectangularScores, widths)
scores = np.array([sizeScores, ratioScores, rotationScores, rectangularScores, areaScores, quadScores])
contourScores = np.dot(weights, scores)
correctInds, incorrectInds = sortedInds(contourScores)
correctContours = np.array(contours)[correctInds]
averageScore = 0
for i in range(numContours):
averageScore += sizeScores[i]
averageScore += ratioScores[i]
averageScore += rotationScores[i]
averageScore += rectangularScores[i]
averageScore += areaScores[i]
averageScore += quadScores[i]
averageScore /= numContours
return averageScore
def image_callback(self, msg):
# convert ROS image to OpenCV image
try:
image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
except CvBridgeError as e:
print(e)
# create hsv image of scene
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# find pink objects in the image
lower_pink = numpy.array([139, 0, 240], numpy.uint8)
upper_pink = numpy.array([159, 121, 255], numpy.uint8)
mask = cv2.inRange(hsv, lower_pink, upper_pink)
# dilate and erode with kernel size 11x11
cv2.morphologyEx(mask, cv2.MORPH_CLOSE, numpy.ones((11,11)))
# find all of the contours in the mask image
contours, heirarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
self.contourLength = len(contours)
# Check for at least one target found
if self.contourLength < 1:
print "No target found"
else: # target found
## Loop through all of the contours, and get their areas
area = [0.0]*len(contours)
for i in range(self.contourLength):
area[i] = cv2.contourArea(contours[i])
#### Target #### the largest "pink" object
target_image = contours[area.index(max(area))]
# Using moments find the center of the object and draw a red outline around the object
target_m = cv2.moments(target_image)
self.target_u = int(target_m['m10']/target_m['m00'])
self.target_v = int(target_m['m01']/target_m['m00'])
points = cv2.minAreaRect(target_image)
box = cv2.cv.BoxPoints(points)
box = numpy.int0(box)
cv2.drawContours(image, [box], 0, (0, 0, 255), 2)
rospy.loginfo("Center of target is x at %d and y at %d", int(self.target_u), int(self.target_v))
self.target_found = True # set flag for depth_callback processing
# show image with target outlined with a red rectangle
cv2.imshow ("Target", image)
cv2.waitKey(3)
# This callback function handles processing Kinect depth image, looking for the depth value
# at the location of the center of the pink target.
def detect_barcode(imageval):
# load the image and convert it to grayscale
file_bytes = np.asarray(bytearray(imageval), dtype=np.uint8)
img_data_ndarray = cv2.imdecode(file_bytes, cv2.CV_LOAD_IMAGE_UNCHANGED)
gray = cv2.cvtColor(img_data_ndarray, cv2.COLOR_BGR2GRAY)
# compute the Scharr gradient magnitude representation of the images
# in both the x and y direction
gradX = cv2.Sobel(gray, ddepth = cv2.cv.CV_32F, dx = 1, dy = 0, ksize = -1)
gradY = cv2.Sobel(gray, ddepth = cv2.cv.CV_32F, dx = 0, dy = 1, ksize = -1)
# subtract the y-gradient from the x-gradient
gradient = cv2.subtract(gradX, gradY)
gradient = cv2.convertScaleAbs(gradient)
# blur and threshold the image
blurred = cv2.blur(gradient, (9, 9))
(_, thresh) = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY)
# construct a closing kernel and apply it to the thresholded image
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 7))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# perform a series of erosions and dilations
closed = cv2.erode(closed, None, iterations = 4)
closed = cv2.dilate(closed, None, iterations = 4)
# find the contours in the thresholded image, then sort the contours
# by their area, keeping only the largest one
(cnts, _) = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
c = sorted(cnts, key = cv2.contourArea, reverse = True)[0]
# compute the rotated bounding box of the largest contour
rect = cv2.minAreaRect(c)
box = np.int0(cv2.cv.BoxPoints(rect))
# draw a bounding box arounded the detected barcode and display the
# image
cv2.drawContours(img_data_ndarray, [box], -1, (0, 255, 0), 3)
# cv2.imshow("Image", image)
#cv2.imwrite("uploads/output-"+ datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") +".jpg",image)
# cv2.waitKey(0)
#outputfile = "uploads/output-" + time.strftime("%H:%M:%S") + ".jpg"
outputfile = "uploads/output.jpg"
cv2.imwrite(outputfile,img_data_ndarray)