def _thread(cls):
# frame grabber loop
while cfg.camera_active:
sbuffer = StringIO.StringIO()
camtest = False
while camtest == False:
camtest, rawimg = cfg.camera.read()
if cfg.cv_hflip:
rawimg = cv2.flip(rawimg, 1)
if cfg.cv_vflip:
rawimg = cv2.flip(rawimg, 0)
imgRGB=cv2.cvtColor(rawimg, cv2.COLOR_BGR2RGB)
img = Image.fromarray(imgRGB)
img.save(sbuffer, 'JPEG')
cls.frame = sbuffer.getvalue()
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
if time.time() - cls.last_access > 10:
break
python类cvtColor()的实例源码
def plot_over_img(self, img, x, y, x_pr, y_pr, bb_gt):
"""Plot the landmarks over the image with the bbox."""
plt.close("all")
fig = plt.figure(frameon=False) # , figsize=(15, 10.8), dpi=200
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), aspect="auto")
ax.scatter(x, y, s=10, color='r')
ax.scatter(x_pr, y_pr, s=10, color='g')
rect = patches.Rectangle(
(bb_gt[0], bb_gt[1]), bb_gt[2]-bb_gt[0], bb_gt[3]-bb_gt[1],
linewidth=1, edgecolor='b', facecolor='none')
ax.add_patch(rect)
fig.add_axes(ax)
return fig
def camera_callback(self, msg):
try:
self.camera_data = self.cv_bridge.imgmsg_to_cv2(msg, "bgr8")
except cv_bridge.CvBridgeError:
return
gray = cv2.cvtColor(self.camera_data, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
canny = cv2.Canny(blur, 30, 150)
cv2.imshow("Robot Camera", canny)
cv2.waitKey(1)
def generate_avatar(dir, filename):
"""
????????????dir/avatar_filename
:return: ?????????bool?
"""
pil_image = numpy.array(Image.open(os.path.join(dir, filename)));
image = None;
try:
image = cv2.cvtColor(numpy.array(pil_image), cv2.COLOR_RGB2BGR);
except:
image = numpy.array(pil_image);
avatar = crop_avatar(image);
if avatar is None:
return False;
else:
cv2.imwrite(os.path.join(dir, "avatar_" + filename), avatar);
return True;
def embed(self,ori_img, wm, key=10):
B = ori_img
if len(ori_img.shape ) > 2 :
img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2YUV)
signature = BlindWatermark._gene_signature(wm,256,key).flatten()
B= img[:,:,0]
w,h = B.shape[:2]
if w< 64 or h <64 :
print('????????????? 64 pixel.?????????.')
return ori_img
if len(ori_img.shape ) > 2 :
img[:,:,0] = self.inner_embed(B,signature)
ori_img = cv2.cvtColor(img, cv2.COLOR_YUV2BGR)
else :
ori_img = B
return ori_img
def create_heatmaps(img, pred):
"""
Uses objectness probability to draw a heatmap on the image and returns it
"""
# find anchors with highest prediction
best_pred = np.max(pred[..., 0], axis=-1)
# convert probabilities to colormap scale
best_pred = np.uint8(best_pred * 255)
# apply color map
# cv2 colormaps create BGR, not RGB
cmap = cv2.cvtColor(cv2.applyColorMap(best_pred, cv2.COLORMAP_JET), cv2.COLOR_BGR2RGB)
# resize the color map to fit image
cmap = cv2.resize(cmap, img.shape[1::-1], interpolation=cv2.INTER_NEAREST)
# overlay cmap with image
return cv2.addWeighted(cmap, 1, img, 0.5, 0)
def detect(img):
img_h, img_w, _ = img.shape
inputs = cv2.resize(img, (settings.image_size, settings.image_size))
inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
inputs = (inputs / 255.0) * 2.0 - 1.0
inputs = np.reshape(inputs, (1, settings.image_size, settings.image_size, 3))
result = detect_from_cvmat(inputs)[0]
print result
for i in range(len(result)):
result[i][1] *= (1.0 * img_w / settings.image_size)
result[i][2] *= (1.0 * img_h / settings.image_size)
result[i][3] *= (1.0 * img_w / settings.image_size)
result[i][4] *= (1.0 * img_h / settings.image_size)
return result
def find_contour(self, img_src, Rxmin, Rymin, Rxmax, Rymax):
cv2.rectangle(img_src, (Rxmax, Rymax), (Rxmin, Rymin), (0, 255, 0), 0)
crop_res = img_src[Rymin: Rymax, Rxmin:Rxmax]
grey = cv2.cvtColor(crop_res, cv2.COLOR_BGR2GRAY)
_, thresh1 = cv2.threshold(grey, 127, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
cv2.imshow('Thresh', thresh1)
contours, hierchy = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# draw contour on threshold image
if len(contours) > 0:
cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)
return contours, crop_res
# Check ConvexHull and Convexity Defects
b3_data_iter.py 文件源码
项目:kaggle-dstl-satellite-imagery-feature-detection
作者: u1234x1234
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def get_data(image_id, a_size, m_size, p_size, sf):
rgb_data = get_rgb_data(image_id)
rgb_data = cv2.resize(rgb_data, (p_size*sf, p_size*sf),
interpolation=cv2.INTER_LANCZOS4)
# rgb_data = rgb_data.astype(np.float) / 2500.
# print(np.max(rgb_data), np.mean(rgb_data))
# rgb_data[:, :, 0] = exposure.equalize_adapthist(rgb_data[:, :, 0], clip_limit=0.04)
# rgb_data[:, :, 1] = exposure.equalize_adapthist(rgb_data[:, :, 1], clip_limit=0.04)
# rgb_data[:, :, 2] = exposure.equalize_adapthist(rgb_data[:, :, 2], clip_limit=0.04)
A_data = get_spectral_data(image_id, a_size*sf, a_size*sf, bands=['A'])
M_data = get_spectral_data(image_id, m_size*sf, m_size*sf, bands=['M'])
P_data = get_spectral_data(image_id, p_size*sf, p_size*sf, bands=['P'])
# lab_data = cv2.cvtColor(rgb_data, cv2.COLOR_BGR2LAB)
P_data = np.concatenate([rgb_data, P_data], axis=2)
return A_data, M_data, P_data
def plot_face_bb(p, bb, scale=True, path=True, plot=True):
if path:
im = cv2.imread(p)
else:
im = cv2.cvtColor(p, cv2.COLOR_RGB2BGR)
if scale:
h, w, _ = im.shape
cv2.rectangle(im, (int(bb[0] * h), int(bb[1] * w)),
(int(bb[2] * h), int(bb[3] * w)),
(255, 255, 0), thickness=4)
# print bb * np.asarray([h, w, h, w])
else:
cv2.rectangle(im, (int(bb[0]), int(bb[1])), (int(bb[2]), int(bb[3])),
(255, 255, 0), thickness=4)
print "no"
if plot:
plt.figure()
plt.imshow(im[:, :, ::-1])
else:
return im[:, :, ::-1]
def _thread(cls):
# frame grabber loop
while cfg.camera_active:
sbuffer = StringIO.StringIO()
camtest = False
while camtest == False:
camtest, rawimg = cfg.camera.read()
if cfg.cv_hflip:
rawimg = cv2.flip(rawimg, 1)
if cfg.cv_vflip:
rawimg = cv2.flip(rawimg, 0)
imgRGB=cv2.cvtColor(rawimg, cv2.COLOR_BGR2RGB)
img = Image.fromarray(imgRGB)
img.save(sbuffer, 'JPEG')
cls.frame = sbuffer.getvalue()
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
if time.time() - cls.last_access > 10:
break
def animpingpong(self):
obj=self.Object
img=None
if not obj.imageFromNode:
img = cv2.imread(obj.imageFile)
else:
print "copy image ..."
img = obj.imageNode.ViewObject.Proxy.img.copy()
print "cpied"
print " loaded"
# print (obj.blockSize,obj.ksize,obj.k)
# edges = cv2.Canny(img,obj.minVal,obj.maxVal)
# color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
# edges=color
#
kernel = np.ones((obj.xsize,obj.ysize),np.uint8)
opening = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernel, iterations = obj.iterations)
if True:
print "zeige"
cv2.imshow(obj.Label,opening)
print "gezeigt"
else:
from matplotlib import pyplot as plt
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(dst,cmap = 'gray')
plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
plt.show()
print "fertig"
self.img=opening
def animpingpong(self):
obj=self.Object
img=None
if not obj.imageFromNode:
img = cv2.imread(obj.imageFile)
else:
print "copy image ..."
img = obj.imageNode.ViewObject.Proxy.img.copy()
print "cpied"
print " loaded"
# print (obj.blockSize,obj.ksize,obj.k)
edges = cv2.Canny(img,obj.minVal,obj.maxVal)
color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
edges=color
if True:
print "zeige"
cv2.imshow(obj.Label,edges)
print "gezeigt"
else:
from matplotlib import pyplot as plt
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(dst,cmap = 'gray')
plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
plt.show()
print "fertig"
self.img=edges
def animpingpong(self):
obj=self.Object
img=None
if not obj.imageFromNode:
img = cv2.imread(obj.imageFile)
else:
print "copy image ..."
img = obj.imageNode.ViewObject.Proxy.img.copy()
print "cpied"
print " loaded"
# print (obj.blockSize,obj.ksize,obj.k)
# edges = cv2.Canny(img,obj.minVal,obj.maxVal)
# color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
# edges=color
#
kernel = np.ones((obj.xsize,obj.ysize),np.uint8)
closing = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernel, iterations = obj.iterations)
if True:
print "zeige"
cv2.imshow(obj.Label,closing)
print "gezeigt"
else:
from matplotlib import pyplot as plt
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(dst,cmap = 'gray')
plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
plt.show()
print "fertig"
self.img=closing
def execute_BlobDetector(proxy,obj):
try: img=obj.sourceObject.Proxy.img.copy()
except: img=cv2.imread(__dir__+'/icons/freek.png')
im = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
im=255-im
im2 = img
params = cv2.SimpleBlobDetector_Params()
params.filterByArea = True
params.minArea = obj.Area
params.filterByConvexity = True
params.minConvexity = obj.Convexity/200
# Set up the detector with default parameters.
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(im)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
if not obj.showBlobs:
im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
obj.Proxy.img = im_with_keypoints
for k in keypoints:
(x,y)=k.pt
x=int(round(x))
y=int(round(y))
# cv2.circle(im,(x,y),4,0,5)
cv2.circle(im,(x,y),4,255,5)
cv2.circle(im,(x,y),4,0,5)
im[y,x]=255
im[y,x]=0
obj.Proxy.img = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
else:
for k in keypoints:
(x,y)=k.pt
x=int(round(x))
y=int(round(y))
cv2.circle(im2,(x,y),4,(255,0,0),5)
cv2.circle(im2,(x,y),4,(0,0,0),5)
im2[y,x]=(255,0,0)
im2[y,x]=(0,0,0)
obj.Proxy.img = im2
def execute_ColorSpace(proxy,obj):
try: img=obj.sourceObject.Proxy.img.copy()
except: img=cv2.imread(__dir__+'/icons/freek.png')
hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
lower = np.array([max(obj.h1-obj.h2,0),max(obj.s1-obj.s2,0),max(obj.v1-obj.v2,0)])
upper = np.array([min(obj.h1+obj.h2,255),min(obj.s1+obj.s2,255),min(obj.v1+obj.v2,255)])
say("ee")
say(lower)
say(upper)
mask = cv2.inRange(hsv, lower, upper)
mask = cv2.inRange(img, lower, upper)
res = cv2.bitwise_and(img,img, mask= mask)
obj.Proxy.img=res
def execute_GoodFeaturesToTrack(proxy,obj):
'''
https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.html
'''
try: img=obj.sourceObject.Proxy.img.copy()
except: img=cv2.imread(__dir__+'/icons/freek.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray,obj.maxCorners,obj.qualityLevel,obj.minDistance)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(img,(x,y),3,255,-1)
obj.Proxy.img = img
def execute_HSV(proxy,obj):
say("hsv ..")
try: img=obj.sourceObject.Proxy.img.copy()
except: img=cv2.imread(__dir__+'/icons/freek.png')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower=np.array([obj.valueColor-obj.deltaColor,0,0])
upper=np.array([obj.valueColor+obj.deltaColor,255,255])
mask = cv2.inRange(hsv, lower, upper)
res = cv2.bitwise_and(hsv,hsv, mask= mask)
obj.Proxy.img=res
def animpingpong(self):
print self
print self.Object
print self.Object.Name
obj=self.Object
img = cv2.imread(obj.imageFile)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray,3,3,0.00001)
dst = cv2.dilate(dst,None)
img[dst>0.01*dst.max()]=[0,0,255]
from matplotlib import pyplot as plt
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(dst,cmap = 'gray')
plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
plt.show()
def img_process(im, landmark, print_img=False):
"""
Image processing, rotate, resize, and crop the face image.
Args:
im: numpy array, Original image
landmark: 5 landmark points
Return:
Crop face region
"""
if landmark is None:
im_rez = cv2.resize(im, (cfg.crop_size, cfg.crop_size))
return im_rez
im_rot, ang, r_landmark = im_rotate(im, landmark)
im_rez, resize_scale, rez_landmark = im_resize(im_rot, r_landmark, ang)
crop = im_crop(im_rez, rez_landmark, resize_scale)
if cfg.forcegray == True:
crop = cv2.cvtColor(crop, cv2.COLOR_RGB2GRAY)
# print('Shapes' + str(im_rot.shape) + str(im_rez.shape) + str(crop.shape))
# return im_rot, im_rez, crop, (crop.astype(np.float) - cfg.PIXEL_MEANS) / cfg.scale
if print_img:
return im, im_rot, im_rez, crop
return crop