def get_points():
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*8,3), np.float32)
objp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1 , 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('calibration_wide/GO*.jpg')
# Step through the list and search for chessboard corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (8,6), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
cv2.drawChessboardCorners(img, (8,6), corners, ret)
#write_name = 'corners_found'+str(idx)+'.jpg'
#cv2.imwrite(write_name, img)
cv2.imshow('img', img)
cv2.waitKey(500)
cv2.destroyAllWindows()
return objpoints, imgpoints
python类COLOR_BGR2GRAY的实例源码
def test_image(addr):
target = ['angry','disgust','fear','happy','sad','surprise','neutral']
font = cv2.FONT_HERSHEY_SIMPLEX
im = cv2.imread(addr)
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray,scaleFactor=1.1)
for (x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 2,5)
face_crop = im[y:y+h,x:x+w]
face_crop = cv2.resize(face_crop,(48,48))
face_crop = cv2.cvtColor(face_crop, cv2.COLOR_BGR2GRAY)
face_crop = face_crop.astype('float32')/255
face_crop = np.asarray(face_crop)
face_crop = face_crop.reshape(1, 1,face_crop.shape[0],face_crop.shape[1])
result = target[np.argmax(model.predict(face_crop))]
cv2.putText(im,result,(x,y), font, 1, (200,0,0), 3, cv2.LINE_AA)
cv2.imshow('result', im)
cv2.imwrite('result.jpg',im)
cv2.waitKey(0)
def corners_unwarp(img, nx, ny, undistorted):
M = None
warped = np.copy(img)
# Use the OpenCV undistort() function to remove distortion
undist = undistorted
# Convert undistorted image to grayscale
gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
# Search for corners in the grayscaled image
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
if ret == True:
# If we found corners, draw them! (just for fun)
cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
# Choose offset from image corners to plot detected corners
# This should be chosen to present the result at the proper aspect ratio
# My choice of 100 pixels is not exact, but close enough for our purpose here
offset = 100 # offset for dst points
# Grab the image shape
img_size = (gray.shape[1], gray.shape[0])
# For source points I'm grabbing the outer four detected corners
src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
# For destination points, I'm arbitrarily choosing some points to be
# a nice fit for displaying our warped result
# again, not exact, but close enough for our purposes
dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
[img_size[0]-offset, img_size[1]-offset],
[offset, img_size[1]-offset]])
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Warp the image using OpenCV warpPerspective()
warped = cv2.warpPerspective(undist, M, img_size)
# Return the resulting image and matrix
return warped, M
def detectFace(image):
cascadePath = "/usr/local/opt/opencv/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml"
FACE_SHAPE = 0.45
result = image.copy()
imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cascade = cv2.CascadeClassifier(cascadePath)
faceRect = cascade.detectMultiScale(imageGray, scaleFactor=1.1, minNeighbors=1, minSize=(1,1))
if len(faceRect) <= 0:
return False
else:
# confirm face
imageSize = image.shape[0] * image.shape[1]
#print("d1")
filteredFaceRects = []
for faceR in faceRect:
faceSize = faceR[2]*faceR[3]
if FACE_SHAPE > min(faceR[2], faceR[3])/max(faceR[2], faceR[3]):
break
filteredFaceRects.append(faceR)
if len(filteredFaceRects) > 0:
return True
else:
return False
def _get_corners(img, board, refine = True, checkerboard_flags=0):
"""
Get corners for a particular chessboard for an image
"""
h = img.shape[0]
w = img.shape[1]
if len(img.shape) == 3 and img.shape[2] == 3:
mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
mono = img
(ok, corners) = cv2.findChessboardCorners(mono, (board.n_cols, board.n_rows), flags = cv2.CALIB_CB_ADAPTIVE_THRESH |
cv2.CALIB_CB_NORMALIZE_IMAGE | checkerboard_flags)
if not ok:
return (ok, corners)
# If any corners are within BORDER pixels of the screen edge, reject the detection by setting ok to false
# NOTE: This may cause problems with very low-resolution cameras, where 8 pixels is a non-negligible fraction
# of the image size. See http://answers.ros.org/question/3155/how-can-i-calibrate-low-resolution-cameras
BORDER = 8
if not all([(BORDER < corners[i, 0, 0] < (w - BORDER)) and (BORDER < corners[i, 0, 1] < (h - BORDER)) for i in range(corners.shape[0])]):
ok = False
if refine and ok:
# Use a radius of half the minimum distance between corners. This should be large enough to snap to the
# correct corner, but not so large as to include a wrong corner in the search window.
min_distance = float("inf")
for row in range(board.n_rows):
for col in range(board.n_cols - 1):
index = row*board.n_rows + col
min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + 1, 0]))
for row in range(board.n_rows - 1):
for col in range(board.n_cols):
index = row*board.n_rows + col
min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + board.n_cols, 0]))
radius = int(math.ceil(min_distance * 0.5))
cv2.cornerSubPix(mono, corners, (radius,radius), (-1,-1),
( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 ))
return (ok, corners)
def __init__(self, filename, folder=None, classifier=None):
"""
:param filename: image with sudoku
:param folder: folder where to save debug images
:param classifier: digit classifier
"""
self.filename = os.path.basename(filename)
image = cv2.imread(filename)
self.image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
self.folder = folder or FOLDER
os.mkdir(os.path.join(self.folder, 'debug/'))
self.classifier = classifier or DigitClassifier()
# Default initial values
self.perspective = False
self.debug = True
self.counter = 0
self.step = -1
def apply_filters(self, image, denoise=False):
""" This method is used to apply required filters to the
to extracted regions of interest. Every square in a
sudoku square is considered to be a region of interest,
since it can potentially contain a value. """
# Convert to grayscale
source_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Denoise the grayscale image if requested in the params
if denoise:
denoised_gray = cv2.fastNlMeansDenoising(source_gray, None, 9, 13)
source_blur = cv2.GaussianBlur(denoised_gray, BLUR_KERNEL_SIZE, 3)
# source_blur = denoised_gray
else:
source_blur = cv2.GaussianBlur(source_gray, (3, 3), 3)
source_thresh = cv2.adaptiveThreshold(source_blur, 255, 0, 1, 5, 2)
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
source_eroded = cv2.erode(source_thresh, kernel, iterations=1)
source_dilated = cv2.dilate(source_eroded, kernel, iterations=1)
if ENABLE_PREVIEW_ALL:
image_preview(source_dilated)
return source_dilated
def CaptureImage():
imageName = 'DontCare.jpg' #Just a random string
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #For capture image in monochrome
rgbImage = frame #For capture the image in RGB color space
# Display the resulting frame
cv2.imshow('Webcam',rgbImage)
#Wait to press 'q' key for capturing
if cv2.waitKey(1) & 0xFF == ord('q'):
#Set the image name to the date it was captured
imageName = str(time.strftime("%Y_%m_%d_%H_%M")) + '.jpg'
#Save the image
cv2.imwrite(imageName, rgbImage)
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
#Returns the captured image's name
return imageName
def compute(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
descriptor = []
dominantGradients = np.zeros_like(frame)
maxGradient = cv2.filter2D(frame, cv2.CV_32F, self.kernels[0])
maxGradient = np.absolute(maxGradient)
for k in range(1,len(self.kernels)):
kernel = self.kernels[k]
gradient = cv2.filter2D(frame, cv2.CV_32F, kernel)
gradient = np.absolute(gradient)
np.maximum(maxGradient, gradient, maxGradient)
indices = (maxGradient == gradient)
dominantGradients[indices] = k
frameH, frameW = frame.shape
for row in range(self.rows):
for col in range(self.cols):
mask = np.zeros_like(frame)
mask[((frameH/self.rows)*row):((frameH/self.rows)*(row+1)),(frameW/self.cols)*col:((frameW/self.cols)*(col+1))] = 255
hist = cv2.calcHist([dominantGradients], [0], mask, self.bins, self.range)
hist = cv2.normalize(hist, None)
descriptor.append(hist)
return np.concatenate([x for x in descriptor])
def load_frame_images(self):
"""
Load images (or image pairs) from self.full_frame_folder_path
"""
print("Loading frames from '{0:s}'".format(self.full_frame_folder_path))
all_files = [f for f in os.listdir(self.full_frame_folder_path)
if osp.isfile(osp.join(self.full_frame_folder_path, f)) and f.endswith(".png")]
all_files.sort()
usable_frame_ct = sys.maxsize
frame_number_sets = []
for video in self.videos:
# assume matching numbers in corresponding left & right files
files = [f for f in all_files if f.startswith(video.name)]
files.sort() # added to be explicit
cam_frame_ct = 0
frame_numbers = []
for ix_pair in range(len(files)):
frame = cv2.imread(osp.join(self.full_frame_folder_path, files[ix_pair]))
frame_number = int(re.search(r'\d\d\d\d', files[ix_pair]).group(0))
frame_numbers.append(frame_number)
found, corners = cv2.findChessboardCorners(frame, self.board_dims)
if not found:
raise ValueError("Could not find corners in image '{0:s}'".format(files[ix_pair]))
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.cornerSubPix(grey, corners, (11, 11), (-1, -1), self.criteria_subpix)
video.image_points.append(corners)
video.usable_frames[frame_number] = ix_pair
cam_frame_ct += 1
usable_frame_ct = min(usable_frame_ct, cam_frame_ct)
frame_number_sets.append(frame_numbers)
if len(self.videos) > 1:
# check that all cameras have the same frame number sets
if len(frame_number_sets[0]) != len(frame_number_sets[1]):
raise ValueError(
"There are some non-paired frames in folder '{0:s}'".format(self.full_frame_folder_path))
for i_fn in range(len(frame_number_sets[0])):
fn0 = frame_number_sets[0][i_fn]
fn1 = frame_number_sets[1][i_fn]
if fn0 != fn1:
raise ValueError("There are some non-paired frames in folder '{0:s}'." +
" Check frame {1:d} for camera {2:s} and frame {3:d} for camera {4:s}."
.format(self.full_frame_folder_path,
fn0, self.videos[0].name,
fn1, self.videos[1].name))
for i_frame in range(usable_frame_ct):
self.object_points.append(self.board_object_corner_set)
return usable_frame_ct
def _resolve_spec(im1, im2):
im = im1.copy()
img1 = cv.cvtColor(im1, cv.COLOR_BGR2GRAY)
img2 = cv.cvtColor(im2, cv.COLOR_BGR2GRAY)
# Best pixel selection criteria
# 1. Pixel difference should be more than 20. (just an experimentally value. Free to change!)
# 2. Best pixel should have less intensity
# 3. pixel should not be pure black. (just an additional constraint
# to remove black background created by warping)
mask = np.logical_and((img1 - img2) > DIFF_THRESHOLD, img1 > img2)
mask = np.logical_and(mask, img2 != 0)
im[mask] = im2[mask]
return im
def homography(self, img, outdir_name=''):
orig = img
# 2??????
gray = cv2.cvtColor(orig, cv2.COLOR_BGR2GRAY)
gauss = cv2.GaussianBlur(gray, (5, 5), 0)
canny = cv2.Canny(gauss, 50, 150)
# 2??????????
contours = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[1]
# ???????????
contours.sort(key=cv2.contourArea, reverse=True)
if len(contours) > 0:
arclen = cv2.arcLength(contours[0], True)
# ???????????
approx = cv2.approxPolyDP(contours[0], 0.01 * arclen, True)
# warp = approx.copy()
if len(approx) >= 4:
self.last_approx = approx.copy()
elif self.last_approx is not None:
approx = self.last_approx
else:
approx = self.last_approx
rect = self.get_rect_by_points(approx)
# warped = self.transform_by4(orig, warp[:, 0, :])
return orig[rect[0]:rect[1], rect[2]:rect[3]]
def execute_Threshold(proxy,obj):
try: img=obj.sourceObject.Proxy.img.copy()
except: img=cv2.imread(__dir__+'/icons/freek.png')
# img = cv2.imread('dave.jpg',0) ??
img = cv2.medianBlur(img,5)
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
if obj.globalThresholding:
ret,th1 = cv2.threshold(img,obj.param1,obj.param2,cv2.THRESH_BINARY)
obj.Proxy.img = cv2.cvtColor(th1, cv2.COLOR_GRAY2RGB)
if obj.adaptiveMeanTresholding:
th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,11,2)
obj.Proxy.img = cv2.cvtColor(th2, cv2.COLOR_GRAY2RGB)
if obj.adaptiveGaussianThresholding:
th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,17,2)
obj.Proxy.img = cv2.cvtColor(th3, cv2.COLOR_GRAY2RGB)
def detect_faces_from_picture(pic_file_path):
print(">>> Let me check this picture: " + pic_file_path)
frame = cv2.imread(pic_file_path)
# Detect faces in the frame
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_frame, 1.3, 5)
# Match the detected faces with the trained model
if len(faces) > 0:
print(">>> Someone is in the picture!")
for (x, y, w, h) in faces:
face = frame[y:y+h, x:x+w]
result = model.predict(face)
for index, name in model.getTrainCfg():
if result == index:
print(">>> Aha, it's %s!" % name)
def find_bibs(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);
binary = cv2.GaussianBlur(gray,(5,5),0)
ret,binary = cv2.threshold(binary, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
#binary = cv2.adaptiveThreshold(binary, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
#ret,binary = cv2.threshold(binary, 190, 255, cv2.THRESH_BINARY);
#lapl = cv2.Laplacian(image,cv2.CV_64F)
#gray = cv2.cvtColor(lapl, cv2.COLOR_BGR2GRAY);
#blurred = cv2.GaussianBlur(lapl,(5,5),0)
#ret,binary = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
#cv2.imwrite("lapl.jpg", lapl)
edges = cv2.Canny(image,175,200)
cv2.imwrite("edges.jpg", edges)
binary = edges
cv2.imwrite("binary.jpg", binary)
contours,hierarchy = find_contours(binary)
return get_rectangles(contours)
def compute(self, frame):
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
dx = cv2.filter2D(frame, cv2.CV_32F, self.xkernel)
dy = cv2.filter2D(frame, cv2.CV_32F, self.ykernel)
orientations = np.zeros_like(dx)
magnitudes = np.zeros_like(dx)
cv2.cartToPolar(dx,dy, magnitudes,orientations)
descriptor = []
frameH, frameW = frame.shape
mask_threshold = magnitudes <= self.threshold
for row in range(self.rows):
for col in range(self.cols):
mask = np.zeros_like(frame)
mask[((frameH/self.rows)*row):((frameH/self.rows)*(row+1)),(frameW/self.cols)*col:((frameW/self.cols)*(col+1))] = 1
mask[mask_threshold] = 0
a_, b_ = mask.shape
hist = cv2.calcHist([orientations], self.channel, mask, [self.bins], self.range)
hist = cv2.normalize(hist, None)
descriptor.append(hist)
return np.concatenate([x for x in descriptor])
def video3d(self, filename, color=False, skip=True):
cap = cv2.VideoCapture(filename)
nframe = cap.get(cv2.CAP_PROP_FRAME_COUNT)
if skip:
frames = [x * nframe / self.depth for x in range(self.depth)]
else:
frames = [x for x in range(self.depth)]
framearray = []
for i in range(self.depth):
cap.set(cv2.CAP_PROP_POS_FRAMES, frames[i])
ret, frame = cap.read()
frame = cv2.resize(frame, (self.height, self.width))
if color:
framearray.append(frame)
else:
framearray.append(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
cap.release()
return np.array(framearray)
def __desaturate(src):
"""Converts a color image into shades of gray.
Args:
src: A color numpy.ndarray.
Returns:
A gray scale numpy.ndarray.
"""
(a, b, channels) = src.shape
if(channels == 1):
return numpy.copy(src)
elif(channels == 3):
return cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
elif(channels == 4):
return cv2.cvtColor(src, cv2.COLOR_BGRA2GRAY)
else:
raise Exception("Input to desaturate must have 1, 3 or 4 channels")
def get_image_features(self, img_file, stride=5, padding=True):
"""
Take an image file as input, and output an array of image features whose matrix size is
based on the image size. When no padding, and the image size is smaller than the required
feature space size (in x or y direction), the image is not checked, and this method will
return a tuple of two empty lists; When padding is True, and the image size is more than
4 pixels smaller than the require feature space size (in x or y direction), the image is
not checked either. This method can be used by both the trainer and predictor.
Args:
img_file: The file name of the image.
stride: Optional. The stride of the sliding.
padding: Optional. Whether to pad the image to fit the feature space size or to
discard the extra pixels if padding is False.
Returns:
coordinates: A list of coordinates, each of which contains y and x that are the top
left corner offsets of the sliding window.
features: A matrix (python list), in which each row contains the features of the
sampling sliding window, while the number of rows depends on the image size of
the input.
"""
img = cv2.imread(img_file)
img_arr = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return self.get_image_array_features(img_arr, stride, padding)
def process_frame(frame_number, frame, keypoint_data, detector, matcher):
log = logging.getLogger("process_frame")
# Create a copy of source frame to draw into
processed = frame.copy()
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
kp, des = detector.detectAndCompute(frame, None)
# Match descriptors
matches = matcher.match(keypoint_data.descriptors, des)
# Sort them in order of distance
matches = sorted(matches, key = lambda x:x.distance)
processed = drawMatches(cv2.imread('car.png',0), keypoint_data.keypoints, gray_frame, kp, matches[:])
return processed
# ============================================================================
def camera_callback(self, msg):
try:
self.camera_data = self.cv_bridge.imgmsg_to_cv2(msg, "bgr8")
except cv_bridge.CvBridgeError:
return
gray = cv2.cvtColor(self.camera_data, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
canny = cv2.Canny(blur, 30, 150)
cv2.imshow("Robot Camera", canny)
cv2.waitKey(1)
def bench(folder):
from os.path import join
from video_capture.av_file_capture import File_Capture
cap = File_Capture(join(folder,'marker-test.mp4'))
markers = []
detected_count = 0
for x in range(500):
frame = cap.get_frame()
img = frame.img
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
markers = detect_markers_robust(gray_img,5,prev_markers=markers,true_detect_every_frame=1,visualize=True)
draw_markers(img, markers)
cv2.imshow('Detected Markers', img)
# for m in markers:
# if 'img' in m:
# cv2.imshow('id %s'%m['id'], m['img'])
# cv2.imshow('otsu %s'%m['id'], m['otsu'])
if cv2.waitKey(1) == 27:
break
detected_count += len(markers)
print(detected_count) #2900 #3042 #3021
def find_contour(self, img_src, Rxmin, Rymin, Rxmax, Rymax):
cv2.rectangle(img_src, (Rxmax, Rymax), (Rxmin, Rymin), (0, 255, 0), 0)
crop_res = img_src[Rymin: Rymax, Rxmin:Rxmax]
grey = cv2.cvtColor(crop_res, cv2.COLOR_BGR2GRAY)
_, thresh1 = cv2.threshold(grey, 127, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
cv2.imshow('Thresh', thresh1)
contours, hierchy = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# draw contour on threshold image
if len(contours) > 0:
cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)
return contours, crop_res
# Check ConvexHull and Convexity Defects
def execute_BlobDetector(proxy,obj):
try: img=obj.sourceObject.Proxy.img.copy()
except: img=cv2.imread(__dir__+'/icons/freek.png')
im = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
im=255-im
im2 = img
params = cv2.SimpleBlobDetector_Params()
params.filterByArea = True
params.minArea = obj.Area
params.filterByConvexity = True
params.minConvexity = obj.Convexity/200
# Set up the detector with default parameters.
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(im)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
if not obj.showBlobs:
im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
obj.Proxy.img = im_with_keypoints
for k in keypoints:
(x,y)=k.pt
x=int(round(x))
y=int(round(y))
# cv2.circle(im,(x,y),4,0,5)
cv2.circle(im,(x,y),4,255,5)
cv2.circle(im,(x,y),4,0,5)
im[y,x]=255
im[y,x]=0
obj.Proxy.img = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
else:
for k in keypoints:
(x,y)=k.pt
x=int(round(x))
y=int(round(y))
cv2.circle(im2,(x,y),4,(255,0,0),5)
cv2.circle(im2,(x,y),4,(0,0,0),5)
im2[y,x]=(255,0,0)
im2[y,x]=(0,0,0)
obj.Proxy.img = im2
def execute_GoodFeaturesToTrack(proxy,obj):
'''
https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.html
'''
try: img=obj.sourceObject.Proxy.img.copy()
except: img=cv2.imread(__dir__+'/icons/freek.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray,obj.maxCorners,obj.qualityLevel,obj.minDistance)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(img,(x,y),3,255,-1)
obj.Proxy.img = img
def animpingpong(self):
print self
print self.Object
print self.Object.Name
obj=self.Object
img = cv2.imread(obj.imageFile)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray,3,3,0.00001)
dst = cv2.dilate(dst,None)
img[dst>0.01*dst.max()]=[0,0,255]
from matplotlib import pyplot as plt
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(dst,cmap = 'gray')
plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
plt.show()
def main():
# prepare object points
nx = 8#TODO: enter the number of inside corners in x
ny = 6#TODO: enter the number of inside corners in y
# Make a list of calibration images
fname = './calibration_wide/GOPR0058.jpg'
img = cv2.imread(fname)
plt.imshow(img)
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
# If found, draw corners
if ret == True:
# Draw and display the corners
cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
plt.imshow(img)
plt.show()
def main():
for i in list(range(4))[::-1]:
print(i+1)
time.sleep(1)
c=0
last_time = time.time()
while True:
c+=1
screen=grab_screen(title='')
screenG=cv2.cvtColor(screen,cv2.COLOR_BGR2GRAY)
screenG=cv2.resize(screenG,(80,60))
keys=key_check()
output=keys_to_output(keys)
training_data.append([screenG,output])
if c%10==0:
print('Recording at ' + str((10 / (time.time() - last_time)))+' fps')
last_time = time.time()
if len(training_data) % 500 == 0:
print(len(training_data))
np.save(file_name,training_data)
def process_img(img):
original_image=img
processed_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
processed_img = cv2.GaussianBlur(processed_img, (3,3), 0 )
copy=processed_img
vertices = np.array([[30, 240], [30, 100], [195, 100], [195, 240]])
processed_img = roi(processed_img, np.int32([vertices]))
verticesP = np.array([[30, 270], [30, 230], [197, 230], [197, 270]])
platform = roi(copy, np.int32([verticesP]))
# edges
#lines = cv2.HoughLinesP(platform, 1, np.pi/180, 180,np.array([]), 3, 2)
#draw_lines(processed_img,lines)
#draw_lines(original_image,lines)
#Platform lines
#imgray = cv2.cvtColor(platform,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(platform,127,255,0)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(original_image, contours, -1, (0,255,0), 3)
try:
platformpos=contours[0][0][0]
except:
platformpos=[[0]]
circles = cv2.HoughCircles(processed_img, cv2.HOUGH_GRADIENT, 1, 20,
param1=90, param2=5, minRadius=1, maxRadius=3)
ballpos=draw_circles(original_image,circles=circles)
return processed_img,original_image,platform,platformpos,ballpos
def do_warp(M, warp):
warp = cv2.warpPerspective(orig, M, (maxWidth, maxHeight))
# convert the warped image to grayscale and then adjust
# the intensity of the pixels to have minimum and maximum
# values of 0 and 255, respectively
warp = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)
warp = exposure.rescale_intensity(warp, out_range = (0, 255))
# the pokemon we want to identify will be in the top-right
# corner of the warped image -- let's crop this region out
(h, w) = warp.shape
(dX, dY) = (int(w * 0.4), int(h * 0.45))
crop = warp[10:dY, w - dX:w - 10]
# save the cropped image to file
cv2.imwrite("cropped.png", crop)
# show our images
cv2.imshow("image", image)
cv2.imshow("edge", edged)
cv2.imshow("warp", imutils.resize(warp, height = 300))
cv2.imshow("crop", imutils.resize(crop, height = 300))
cv2.waitKey(0)