def draw(self, image):
if len(self.tilesByOrder) == 0:
cv2.imshow("image", image)
for tile in self.tilesByOrder:
cv2.rectangle(image, (tile.wx, tile.wy), (tile.wx + tile.w, tile.wy + tile.h),
(0, 255, 0), 1)
#Left bezel
cv2.rectangle(image, (tile.wx - tile.l, tile.wy), (tile.wx, tile.wy + tile.h),
(40, 255, 40), -1)
#Top bezel
cv2.rectangle(image, (tile.wx - tile.l, tile.wy - tile.t), (tile.wx + tile.w, tile.wy),
(40, 255, 40), -1)
#Right bezel
cv2.rectangle(image, (tile.wx + tile.w, tile.wy - tile.t), (tile.wx + tile.w + tile.r, tile.wy + tile.h),
(40, 255, 40), -1)
#Bottom bezel
cv2.rectangle(image, (tile.wx - tile.l, tile.wy + tile.h), (tile.wx + tile.w + tile.r, tile.wy + tile.h + tile.b),
(40, 255, 40), -1)
cv2.imshow("image", image)
python类imshow()的实例源码
def rotating_example():
img = cv2.imread('./data/hi.jpg')
vwriter = VideoWriterRGB('hi-video.avi')
frames = 0
for angle in range(0, 360, 5):
rot = imutils.rotate(img, angle=angle)
cv2.imshow("Angle = %d" % (angle), rot)
vwriter.addFrame(rot)
frames += 1
for angle in range(360, 0, -5):
rot = imutils.rotate(img, angle=angle)
cv2.imshow("Angle = %d" % (angle), rot)
vwriter.addFrame(rot)
frames += 1
vwriter.finalise()
print("Created movie with %d frames" % frames)
def show_cut_img(img_name):
img = cv2.imread(img_name, 0)
cut_img = cut(img)
cv2.imshow('cut image', cut_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return cut_img
# ??????????????????id??logoDirs????
def dispact_and_update(img, hack, base_im, x, y, w, h):
try:
myurl = "http://facejack.westeurope.cloudapp.azure.com:5001/imsend"
headers = {
'content-type': "application/x-www-form-urlencoded",
'cache-control': "no-cache"
}
r = requests.post(url=myurl, data=img, headers=headers, params={'hack': str(hack)}).json()
reply = 'authentication' in r and r['authentication'] == "ALLOWED"
disp_face = cv2.resize(base_im[y:y + h, x:x + w], (224, 224), 0, 0, cv2.INTER_LANCZOS4)
if reply:
cv2.rectangle(disp_face, (0, 0), (222, 222), (0, 255, 0), 2)
else:
cv2.rectangle(disp_face, (0, 0), (222, 222), (0, 0, 255), 2)
cv2.imshow("Face", disp_face)
finally:
myl.release()
def generalBlur(srcpath, dstpath):
img = cv2.imread(srcpath, 0) #????????
img1 = np.float32(img) #??????
kernel = np.ones((5,5),np.float32)/25
dst = cv2.filter2D(img1,-1,kernel)
#cv2.filter2D(src,dst,kernel,auchor=(-1,-1))???
#?????????????
#?????-1??????????plt.figure()
plt.subplot(1,2,1), plt.imshow(img1,'gray')
# plt.savefig('test1.jpg')
plt.subplot(1,2,2), plt.imshow(dst,'gray')
# plt.savefig('test2.jpg')
plt.show()
# ????
def click_and_crop(event, x, y, flags, param):
global bbs, x_upper, id
if event == cv2.EVENT_LBUTTONDOWN:
if x_upper:
bbs.append([x,y,0,0, 0,0,0,0])
else:
bbs[-1][4] = x
bbs[-1][5] = y
elif event == cv2.EVENT_LBUTTONUP:
if x_upper:
bbs[-1][2] = abs(x - bbs[-1][0])
bbs[-1][3] = abs(y - bbs[-1][1])
bbs[-1][0] = min(x, bbs[-1][0])
bbs[-1][1] = min(y, bbs[-1][1])
cv2.rectangle(image, (bbs[-1][0],bbs[-1][1]), (bbs[-1][0]+bbs[-1][2],bbs[-1][1]+bbs[-1][3]), (0,0,255), 2)
#cv2.putText(image, 'Upper %d' % id, (bbs[-1][0],bbs[-1][1]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,255))
else:
bbs[-1][6] = abs(x - bbs[-1][4])
bbs[-1][7] = abs(y - bbs[-1][5])
bbs[-1][4] = min(x, bbs[-1][4])
bbs[-1][5] = min(y, bbs[-1][5])
cv2.rectangle(image, (bbs[-1][4],bbs[-1][5]), (bbs[-1][4]+bbs[-1][6],bbs[-1][5]+bbs[-1][7]), (0,255,0), 2)
cv2.putText(image, 'Body %d' % id, (bbs[-1][4],bbs[-1][5]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0,255,0))
cv2.imshow("image", image)
x_upper = not x_upper
def show(im, allobj, S, w, h, cellx, celly):
for obj in allobj:
a = obj[5] % S
b = obj[5] // S
cx = a + obj[1]
cy = b + obj[2]
centerx = cx * cellx
centery = cy * celly
ww = obj[3]**2 * w
hh = obj[4]**2 * h
cv2.rectangle(im,
(int(centerx - ww/2), int(centery - hh/2)),
(int(centerx + ww/2), int(centery + hh/2)),
(0,0,255), 2)
cv2.imshow("result", im)
cv2.waitKey()
cv2.destroyAllWindows()
def next_batch(self):
self.count += 1
# print self.count
start = self.index_in_epoch
self.index_in_epoch += batch_size / pairs_per_img
if self.index_in_epoch > self.number:
self.index_in_epoch = 0
start = self.index_in_epoch
self.index_in_epoch += batch_size / pairs_per_img
end = self.index_in_epoch
data_batch, label_batch = generate_data(self.img_path_list[start])
for i in range(start+1, end):
data, label = generate_data(self.img_path_list[i]) # [4, 2, 128, 128], [4, 1, 8]
data_batch = np.concatenate((data_batch, data)) # [64, 2, 128, 128]
label_batch = np.concatenate((label_batch, label)) # [64, 1, 8]
data_batch = np.array(data_batch).transpose([0, 2, 3, 1]) # (64, 128, 128, 2)
# cv2.imshow('window2', data_batch[1,:,:,1].squeeze())
# cv2.waitKey()
label_batch = np.array(label_batch).squeeze() # (64, 1, 8)
return data_batch, label_batch
def next_batch(self):
start = self.index_in_epoch
self.index_in_epoch += batch_size / pairs_per_img
if self.index_in_epoch > self.number:
self.index_in_epoch = 0
start = self.index_in_epoch
self.index_in_epoch += batch_size / pairs_per_img
end = self.index_in_epoch
data_batch, label_batch = generate_data(self.img_path_list[start])
for i in range(start+1, end):
data, label = generate_data(self.img_path_list[i])
data_batch = np.concatenate((data_batch, data))
label_batch = np.concatenate((label_batch, label))
data_batch = np.array(data_batch).transpose([0, 2, 3, 1])
# cv2.imshow('window2', data_batch[1,:,:,1].squeeze())
# cv2.waitKey()
label_batch = np.array(label_batch).squeeze()
return data_batch, label_batch
def make_mouse_callback(imgs, ref_pt):
# initialize the list of reference points and boolean indicating
# whether cropping is being performed or not
cropping = [False]
clone = imgs[0]
def _click_and_crop(event, x, y, flags, param):
# grab references to the global variables
# global ref_pt, cropping
# if the left mouse button was clicked, record the starting
# (x, y) coordinates and indicate that cropping is being
# performed
if event == cv2.EVENT_LBUTTONDOWN:
ref_pt[0] = (x, y)
cropping[0] = True
# check to see if the left mouse button was released
elif event == cv2.EVENT_LBUTTONUP:
# record the ending (x, y) coordinates and indicate that
# the cropping operation is finished
ref_pt[1] = (x, y)
cropping[0] = False
# draw a rectangle around the region of interest
imgs[1] = image = clone.copy()
cv2.rectangle(image, ref_pt[0], ref_pt[1], (0, 255, 0), 2)
cv2.imshow("image", image)
elif event == cv2.EVENT_MOUSEMOVE and cropping[0]:
img2 = clone.copy()
cv2.rectangle(img2, ref_pt[0], (x, y), (0, 255, 0), 2)
imgs[1] = image = img2
cv2.imshow("image", image)
return _click_and_crop
def test_minicap():
from atx.drivers.android_minicap import AndroidDeviceMinicap
cv2.namedWindow("preview")
d = AndroidDeviceMinicap()
while True:
try:
h, w = d._screen.shape[:2]
img = cv2.resize(d._screen, (w/2, h/2))
cv2.imshow('preview', img)
key = cv2.waitKey(1)
if key == 100: # d for dump
filename = time.strftime('%Y%m%d%H%M%S.png')
cv2.imwrite(filename, d._screen)
except KeyboardInterrupt:
break
cv2.destroyWindow('preview')
def test_grid():
m = StupidMonkey({'touch':10})
poss = []
while True:
pos = m.get_touch_point()
if not pos:
break
poss.append(pos)
print 'grid point count:', len(poss)
import cv2
import numpy
img = numpy.zeros((1920, 1080))
for x,y in poss:
img[x,y] = 255
img = cv2.resize(img, (540, 960))
cv2.imshow('grid', img)
cv2.waitKey()
def draw_boxes(im, bboxes, is_display=True, color=None, caption="Image", wait=True):
"""
boxes: bounding boxes
"""
im=im.copy()
for box in bboxes:
if color==None:
if len(box)==5 or len(box)==9:
c=tuple(cm.jet([box[-1]])[0, 2::-1]*255)
else:
c=tuple(np.random.randint(0, 256, 3))
else:
c=color
cv2.rectangle(im, tuple(box[:2]), tuple(box[2:4]), c)
if is_display:
cv2.imshow(caption, im)
if wait:
cv2.waitKey(0)
return im
def markPoint(event, x, y, flags, param):
global idx
global data
global input
if event == cv2.EVENT_LBUTTONUP:
data.append((x, y))
cv2.circle(input, (x, y), 3, (0,0,255), 2)
cv2.putText(input, str(idx), (x, y+4), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 2, cv2.LINE_AA)
cv2.imshow("Mark points", input)
idx = idx + 1
def visualize_image(image, name="Image", resize=False, save_image=False, path=None):
"""Helper function to visualize and save any image"""
image = image.reshape([IMAGE_WIDTH, IMAGE_HEIGHT])
image = image.astype(np.uint8)
if resize:
image = cv2.resize(image, (IMAGE_WIDTH * 10, IMAGE_HEIGHT * 10))
cv2.imshow(name, image)
if cv2.waitKey(0) & 0xFF == ord('q'):
cv2.destroyAllWindows()
if save_image:
assert path is not None
cv2.imwrite(path, image)
def image_preview(image):
cv2.imshow('Image preview', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def display_solution(square_borders, start_grid, solution, image):
""" Writes the solution to an image and displays said image.
Params:
square_borders -- A list containing the borders of all squares
start_grid -- A list containing the sudoku starting values
solution -- A list containing the sudoku solution
image -- The image to write to """
cur_row = 0
cur_col = 0
for i, b in enumerate(square_borders):
x, y, x2, y2 = b # Tuple unpacking
# Calculate bottom-left position for text
text_x, text_y = ((x2+x) / 2) - 10, ((y2+y) / 2) + 10
# Bottom-left corner for text position
org = (text_x, text_y)
# Only write text if the position was not set in the start_grid
if start_grid[cur_row][cur_col] is 0:
value = str(solution[cur_row][cur_col])
cv2.putText(
img=image,
text=value,
org=org,
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1,
color=(0, 255, 0),
thickness=2)
cur_col += 1
if cur_col % 9 == 0:
cur_row += 1
cur_col = 0
cv2.imshow('Solution', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def process_image(image):
array_image = numpy.array(image)
array_image = cv2.resize(array_image, (0,0), fx=2, fy=2)
#cv2.imshow('image', array_image)
#cv2.waitKey(0)
image = PIL.Image.fromarray(array_image)
return(image)
def test(self):
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
self.saver_z.restore(sess, self.encode_z_model)
self.saver_y.restore(sess, self.encode_y_model)
realbatch_array, _ = MnistData.getNextBatch(self.ds_train, self.label_y, 0, 50,
self.batch_size)
output_image , label_y = sess.run([self.fake_images,self.e_y], feed_dict={self.images: realbatch_array})
#one-hot
#label_y = tf.arg_max(label_y, 1)
print label_y
save_images(output_image , [8 , 8] , './{}/test{:02d}_{:04d}.png'.format(self.sample_path , 0, 0))
save_images(realbatch_array , [8 , 8] , './{}/test{:02d}_{:04d}_r.png'.format(self.sample_path , 0, 0))
gen_img = cv2.imread('./{}/test{:02d}_{:04d}.png'.format(self.sample_path , 0, 0), 0)
real_img = cv2.imread('./{}/test{:02d}_{:04d}_r.png'.format(self.sample_path , 0, 0), 0)
cv2.imshow("test_EGan", gen_img)
cv2.imshow("Real_Image", real_img)
cv2.waitKey(-1)
print("Test finish!")
def image_detector(self, imname, wait=0):
detect_timer = Timer()
image = cv2.imread(imname)
detect_timer.tic()
result = self.detect(image)
detect_timer.toc()
print('Average detecting time: {:.3f}s'.format(detect_timer.average_time))
self.draw_result(image, result)
cv2.imshow('Image', image)
cv2.waitKey(wait)