def calibrateColor(color, def_range):
global kernel
name = 'Calibrate '+ color
cv2.namedWindow(name)
cv2.createTrackbar('Hue', name, def_range[0][0]+20, 180, nothing)
cv2.createTrackbar('Sat', name, def_range[0][1] , 255, nothing)
cv2.createTrackbar('Val', name, def_range[0][2] , 255, nothing)
while(1):
ret , frameinv = cap.read()
frame=cv2.flip(frameinv ,1)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hue = cv2.getTrackbarPos('Hue', name)
sat = cv2.getTrackbarPos('Sat', name)
val = cv2.getTrackbarPos('Val', name)
lower = np.array([hue-20,sat,val])
upper = np.array([hue+20,255,255])
mask = cv2.inRange(hsv, lower, upper)
eroded = cv2.erode( mask, kernel, iterations=1)
dilated = cv2.dilate( eroded, kernel, iterations=1)
cv2.imshow(name, dilated)
k = cv2.waitKey(5) & 0xFF
if k == ord(' '):
cv2.destroyWindow(name)
return np.array([[hue-20,sat,val],[hue+20,255,255]])
elif k == ord('d'):
cv2.destroyWindow(name)
return def_range
python类destroyWindow()的实例源码
def select_roi(self):
"""Prompt user for a region of interest.
Args:
None.
Returns:
ROI (tuple): selected ROI coordinates.
"""
ROI = cv2.selectROI('Select region of interest...', self.current_frame,
False, False)
cv2.destroyWindow('Select region of interest...')
return ROI
def realtime():
#initialize preview
cv2.namedWindow("preview")
vc = cv2.VideoCapture(0)
if vc.isOpened(): #get the first frame
rval, frame = vc.read()
else:
rval = False
classes=["peace","punch","stop","thumbs_up"]
while rval:
frame=cv2.flip(frame,1)
cv2.rectangle(frame,(300,200),(500,400),(0,255,0),1)
cv2.putText(frame,"Place your hand in the green box.", (50,50), cv2.FONT_HERSHEY_PLAIN , 1, 255)
cv2.putText(frame,"Press esc to exit.", (50,100), cv2.FONT_HERSHEY_PLAIN , 1, 255)
cv2.imshow("preview", frame)
frame=frame[200:400,300:500]
#frame = cv2.resize(frame, (200,200))
frame = cv2.cvtColor( frame, cv2.COLOR_RGB2GRAY)
frame=frame.reshape((1,)+frame.shape)
frame=frame.reshape(frame.shape+(1,))
test_datagen = ImageDataGenerator(rescale=1./255)
m=test_datagen.flow(frame,batch_size=1)
y_pred=model.predict_generator(m,1)
histarray2={'PEACE': y_pred[0][0], 'PUNCH': y_pred[0][1], 'STOP': y_pred[0][2], 'Thumbs Up': y_pred[0][3]}
update(histarray2)
print(classes[list(y_pred[0]).index(y_pred[0].max())])
rval, frame = vc.read()
key = cv2.waitKey(20)
if key == 27: # exit on ESC
break
cv2.destroyWindow("preview")
vc=None
#loading the model
def destroy(self):
cv2.destroyWindow(self.winName)
def show_img(img, boxes=None, window_name="Happy Dance Image", msec_to_show_for=1500,
save=False, filepath='None'):
"""Show an image, potentially with surrounding bounding boxes
Args:
----
img: np.ndarray
boxes (optional): dct of bounding boxes where the keys hold the name (actual
or predicted) and the values the coordinates of the boxes
window_name (optional): str
msec_to_show_for (optioanl): int
"""
img_copy = img.copy() # Any drawing is inplace. Draw on copy to protect original.
if boxes:
color_dct = {'actual': (125, 255, 0), 'predicted': (0, 25, 255)}
for box_type, box_coords in boxes.items():
cv2.rectangle(img_copy,
pt1=(box_coords[0], box_coords[1]),
pt2=(box_coords[2], box_coords[3]),
color=color_dct[box_type],
thickness=2)
if not save:
cv2.imshow(window_name, img_copy)
cv2.waitKey(msec_to_show_for)
cv2.destroyWindow(window_name)
else:
cv2.imwrite(filepath, img_copy)
def capture_frame():
# Open the "default" camera
vc = cv2.VideoCapture(0)
# Check if we succeeded in opening camera feed
if vc.isOpened():
rval, frame = vc.read()
else:
rval = False
# Display captured frames in a new window
cv2.namedWindow("Camera Video Feed")
while rval:
cv2.imshow("Camera Video Feed", frame)
# cv2.imshow("Camera Video Feed", result)
rval, frame = vc.read()
key = cv2.waitKey(20)
if key == 27: # User pressed ESC key
break
elif key == ord('s'):
break
# Destroy window
cv2.destroyWindow("Camera Video Feed")
# Close VideoCapture feed -- Important!
vc.release()
# Save the frame
cv2.imwrite('../images/captured_frame.png', frame)
return frame
def track_obj(low_hsv, high_hsv):
# Open the "default" camera
vc = cv2.VideoCapture(0)
# Check if we succeeded in opening camera feed
if vc.isOpened():
rval, frame = vc.read()
else:
rval = False
# Display captured frames in a new window
cv2.namedWindow("Camera Video Feed")
# Display filtered object frame in a new window
cv2.namedWindow("Tracking")
result = frame
while rval:
cv2.imshow("Camera Video Feed", frame)
cv2.imshow("Tracking", result)
rval, frame = vc.read()
# Convert to HSV space
frameHSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Filter out components with values in selected range
# Threshold HSV image
mask = cv2.inRange(frameHSV, low_hsv, high_hsv)
result = cv2.bitwise_and(frame, frame, mask = mask)
# Wait for ESC key press
key = cv2.waitKey(20)
if key == 27: # User pressed ESC key
break
# Destroy window
cv2.destroyWindow("Camera Video Feed")
# Close VideoCapture feed -- Important!
vc.release()
def klick_landmarks_on_image():
global current_landmark, klicked_landmarks
cv2.namedWindow("image")
cv2.setMouseCallback("image", click)
show_lms_on_image()
image = cv2.imread('/user/HS204/m09113/Downloads/face_synthesis/M1000_22_L0_V9R_N_small.JPG')
for lm_idx in range(68):
while True:
temp_image = image.copy()
lms_to_be_shown = klicked_landmarks#+current_landmark
if len(current_landmark)>0:
lms_to_be_shown =klicked_landmarks + [current_landmark]
if len(lms_to_be_shown)>0:
draw_lms_on_image(temp_image, lms_to_be_shown)
cv2.imshow("image", temp_image)
key = cv2.waitKey(1) & 0xFF
if key == ord(" "):
if len(current_landmark)>0:
klicked_landmarks.append(current_landmark)
break
if key == ord("q"):
return 0
current_landmark=[]
cv2.destroyWindow("image")
#now write lm file
landmark_file = '/user/HS204/m09113/Downloads/face_synthesis/M1000_22_L0_V9R_N_small.pts'
with open(landmark_file, "w") as lf:
lf.write('version: 1\n')
lf.write('n_points: 68\n')
lf.write('{\n')
for landmark in klicked_landmarks:
lf.write(str(landmark[0])+" "+str(landmark[1])+"\n")
lf.write('}\n')
def main():
all_bb = []
cv2.namedWindow("image")
cv2.setMouseCallback("image", click)
images = glob.glob('/user/HS204/m09113/facer2vm_project_area/data/300VW_Dataset_2015_12_14/*/frames/000001.png')
output_file_path = '/user/HS204/m09113/facer2vm_project_area/data/300VW_Dataset_2015_12_14/bb_clicked_philipp.log'
for i, image_path in enumerate(images):
print ('image',image_path,'(',i,'of',len(images),')')
image = cv2.imread(image_path)
upper_left_point, lower_right_point = click_bb_on_image(image)
all_bb.append([upper_left_point[0], upper_left_point[1], lower_right_point[0], lower_right_point[1]])
#print (upper_left_point, lower_right_point)
open(output_file_path, 'a').write(str(image_path)+' '+str(upper_left_point[0])+' '+str(upper_left_point[1])+' '+str(lower_right_point[0])+' '+str(lower_right_point[1])+'\n')
cv2.destroyWindow("image")
#now write lm file
# landmark_file = '/user/HS204/m09113/Downloads/face_synthesis/M1000_22_L0_V9R_N_small.pts'
# with open(landmark_file, "w") as lf:
# lf.write('version: 1\n')
# lf.write('n_points: 68\n')
# lf.write('{\n')
# for landmark in klicked_landmarks:
# lf.write(str(landmark[0])+" "+str(landmark[1])+"\n")
# lf.write('}\n')
# return x, y, w, h
def destroy_window(self):
cv2.destroyWindow(self._window_name)
self._isWindowCreated = False
def _close(self):
if self.verbose:
print('Closing window')
print('\n--------------------------------------')
print('Colorspace:', self.cspace)
if self.cspace == 'Grayscale':
print('Lower bound:', self._lowerb[0])
print('Upper bound:', self._upperb[0])
else:
print('Lower bounds:', self._lowerb)
print('Upper bounds:', self._upperb)
print('--------------------------------------\n')
cv2.destroyWindow(self.name)
def main():
window = 'preview'
cv2.namedWindow(window)
tfrecord_file_names = glob(path.join('data', '*.tfrecord.gz'))
max_reads = 50
batch_size = 50
with tf.Graph().as_default() as graph:
image_batch, type_batch = import_images(tfrecord_file_names, max_reads=max_reads, batch_size=batch_size)
coord = tf.train.Coordinator()
with tf.Session(graph=graph) as sess:
init = tf.group(tf.local_variables_initializer(), tf.global_variables_initializer())
sess.run(init)
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
while not coord.should_stop():
Xs = sess.run(image_batch)
for img in Xs:
cv2.imshow(window, img)
if (cv2.waitKey(33) & 0xff) == 27:
coord.request_stop()
break
except tf.errors.OutOfRangeError:
print('Read all examples.')
finally:
coord.request_stop()
coord.join(threads)
cv2.destroyWindow(window)
def extract_video_frames(queue: PriorityQueue,
source: int,
cap: cv2.VideoCapture,
crop: Tuple[int, int, int, int],
target_width: int,
target_height: int,
frame_step: int=1,
display_progress: bool=False):
window = 'video'
if display_progress:
cv2.namedWindow(window)
while True:
success, buffer = cap.read()
if not success:
break
# crop borders
buffer = buffer[crop[0]:-crop[2], crop[1]:-crop[3], :]
buffer = cv2.resize(buffer, (target_width, target_height), interpolation=cv2.INTER_AREA)
frame = cap.get(cv2.CAP_PROP_POS_FRAMES)
random_priority = random()
queue.put((random_priority, (buffer, source)))
if display_progress:
cv2.imshow(window, buffer)
if (cv2.waitKey(33) & 0xff) == 27:
break
cap.set(cv2.CAP_PROP_POS_FRAMES, frame + frame_step)
if display_progress:
cv2.destroyWindow(window)
def run(self):
video_capture = cv2.VideoCapture(0)
while True:
got_a_frame, image = video_capture.read()
grayimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow('camera',grayimage)
key = cv2.waitKey(50)
if key == 27:
break
cv2.destroyWindow('camera')
# Create new threads
def imshow(im, window_name = 'default'):
cv2.imshow(window_name, im)
key = cv2.waitKey(0)
# print(key)
cv2.destroyWindow(window_name)
if key ==27:
raise Exception('Esc pressed!')
return
def review(self, TD_object):
"""Displays the TD recording overlaid with the annotated track.
On events are red, and off events are blue.
Takes in:
TD_object: An Events object (see eventvision module).
"""
cv2.namedWindow('review_frame')
for i in range(1, len(self.data.ts)):
current_frame = np.zeros((TD_object.height,TD_object.width,3), np.uint8)
tmin = self.data.ts[i-1]
tmax = self.data.ts[i]
tminind = np.min(np.where(TD_object.data.ts >= tmin))
tmaxind = np.max(np.where(TD_object.data.ts <= tmax))
# Populate the current frame with all the events which occur between successive timestamps of the
# annotated track events. Track event which was saved at the end of the current frame is shown.
current_frame[TD_object.data.y[tminind:tmaxind][TD_object.data.p[tminind:tmaxind] == 1], TD_object.data.x[tminind:tmaxind][TD_object.data.p[tminind:tmaxind] == 1], :] = [100, 100, 255]
current_frame[TD_object.data.y[tminind:tmaxind][TD_object.data.p[tminind:tmaxind] == 0], TD_object.data.x[tminind:tmaxind][TD_object.data.p[tminind:tmaxind] == 0], :] = [255, 255, 30]
cv2.circle(current_frame, (self.data.x[i], self.data.y[i]), 10, (0,255,0), 2)
cv2.imshow('review_frame', current_frame)
key = cv2.waitKey(1)
cv2.destroyWindow('review_frame')
def trackObjects(self):
for area in self.trackedAreasList:
# Template matching
gray = cv2.cvtColor(self.processedFrame, cv2.COLOR_BGR2GRAY)
templ = area.getGrayStackAve()
cc = cv2.matchTemplate(gray, templ, cv2.TM_CCOEFF_NORMED)
cc = cc * cc * cc * cc
_, cc = cv2.threshold(cc, 0.1, 0, cv2.THRESH_TOZERO)
cc8 = cv2.normalize(cc, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
mask = np.zeros_like(cc8)
# Search match within template region
mcorn = area.getEnlargedCorners(0) # If not 0, enalrge the search
cv2.rectangle(mask, mcorn[0], mcorn[1], 255, -1)
_, _, _, mx = cv2.minMaxLoc(cc8, mask)
# kp = area.getKalmanPredict()
# area.updateWindow(kp)
# area.setTemplate(self.processedFrame)
# Prevent large spatial jumps
(c, r, _, _) = area.getcrwh()
jump = 10
if abs(c - mx[0]) < jump and abs(r - mx[1]) < jump:
# area.setKalmanCorrect(mx)
area.updateWindow(mx)
else:
# area.setKalmanCorrect((c, r))
area.updateWindow((c, r))
area.setTemplate(self.processedFrame)
# Show the template stack
if self.showTemplate is True:
cv2.imshow('Stack: '+str(area), area.getStack())
else:
try:
cv2.destroyWindow('Stack: '+str(area))
except:
pass
# Show the matching results
if self.showMatch is True:
cv2.rectangle(cc8, mcorn[0], mcorn[1], 255, 1)
cv2.circle(cc8, mx, 5, 255, 1)
cv2.imshow('Match: '+str(area), cc8)
else:
try:
cv2.destroyWindow('Match: '+str(area))
except:
pass
# Draw the tracked area on the image
corn = area.getCorners()
cv2.rectangle(self.workingFrame,
corn[0], corn[1],
(0, 255, 0), 1)
# self.showFrame()
# raw_input('wait')
def run(self, update_fun=None):
"""Start the image viewer.
This method blocks until the user requests to close the window.
Parameters
----------
update_fun : Optional[Callable[] -> None]
An optional callable that is invoked at each frame. May be used
to play an animation/a video sequence.
"""
if update_fun is not None:
self._user_fun = update_fun
self._terminate, is_paused = False, False
# print("ImageViewer is paused, press space to start.")
while not self._terminate:
t0 = time.time()
if not is_paused:
self._terminate = not self._user_fun()
if self._video_writer is not None:
self._video_writer.write(
cv2.resize(self.image, self._window_shape))
t1 = time.time()
remaining_time = max(1, int(self._update_ms - 1e3*(t1-t0)))
cv2.imshow(
self._caption, cv2.resize(self.image, self._window_shape[:2]))
key = cv2.waitKey(remaining_time)
if key & 255 == 27: # ESC
print("terminating")
self._terminate = True
elif key & 255 == 32: # ' '
print("toggeling pause: " + str(not is_paused))
is_paused = not is_paused
elif key & 255 == 115: # 's'
print("stepping")
self._terminate = not self._user_fun()
is_paused = True
# Due to a bug in OpenCV we must call imshow after destroying the
# window. This will make the window appear again as soon as waitKey
# is called.
#
# see https://github.com/Itseez/opencv/issues/4535
self.image[:] = 0
cv2.destroyWindow(self._caption)
cv2.waitKey(1)
cv2.imshow(self._caption, self.image)
def show_tool_01(real, pred, show_shape):
"""
"""
real_shape = real.shape
pred_shape = pred.shape
_real = imgs_display(real, show_shape)
_pred = gray2rgb(imgs_display(pred, show_shape))
select_imgs = []
def tool(event, x, y, flags, param):
inx = int(x/real_shape[1])
iny = int(y/real_shape[2])
def find():
px = inx*pred_shape[1]
py = iny*pred_shape[2]
p = np.copy(_pred)
# cv2.putText(p, "x:%s y:%s px:%s py:%s"%(x,y,px,py), (px,py), cv2.FONT_HERSHEY_SIMPLEX, 0.2, (255, 255, 255), 1)
cv2.rectangle(p, (px, py), (px+pred_shape[1], py+pred_shape[2]), (0, 255, 0), 2)
cv2.imshow("PRED", p)
if event == cv2.EVENT_LBUTTONDOWN:
select_imgs = param[0]
select_imgs.append(pred[inx + iny*show_shape[1]])
select_imgs = select_imgs[-10*10:]
cv2.imshow("SELECT", imgs_display(np.array(select_imgs), [10]*2))
elif event == cv2.EVENT_MOUSEMOVE:
find()
elif event == cv2.EVENT_LBUTTONUP:
# cv2.destroyWindow(win_name)
pass
cv2.namedWindow("REAL")
cv2.setMouseCallback("REAL", tool, [select_imgs])
cv2.imshow("PRED", _pred)
# keep looping until the 'q' key is pressed
while True:
# display the image and wait for a keypress
cv2.imshow("REAL", _real)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cv2.destroyAllWindows()
def main():
window = 'preview'
cv2.namedWindow(window)
tfrecord_file_names = glob(path.join('data', '*-2.tfrecord.gz'))
max_reads = 200
batch_size = 50
with tf.Graph().as_default() as graph:
image_batch, type_batch = import_images(tfrecord_file_names, max_reads=max_reads, batch_size=batch_size)
import_graph('exported/vae-refine.pb', input_map={'image_batch': image_batch}, prefix='process')
phase_train = graph.get_tensor_by_name('process/mogrify/vae/phase_train:0')
embedding = graph.get_tensor_by_name('process/mogrify/vae/variational/add:0')
reconstructed = graph.get_tensor_by_name('process/mogrify/clip:0')
reconstructed.set_shape((None, 180, 320, 3))
refined = graph.get_tensor_by_name('process/refine/y:0')
refined.set_shape((None, 180, 320, 3))
coord = tf.train.Coordinator()
with tf.Session(graph=graph) as sess:
init = tf.group(tf.local_variables_initializer(), tf.global_variables_initializer())
sess.run(init)
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
print('Evaluating ...')
while not coord.should_stop():
# fetching the embeddings given the inputs ...
reference, coeffs = sess.run([image_batch, embedding], feed_dict={phase_train: False})
# ... then salting the embeddings ...
coeffs += np.random.randn(coeffs.shape[0], coeffs.shape[1])
# ... then fetching the images given the new embeddings.
results = sess.run(refined, feed_dict={phase_train: False, embedding: coeffs})
assert reference.shape == results.shape
reference = reference[:3]
results = results[:3]
canvas = example_gallery(reference, results)
cv2.imshow(window, canvas)
if (cv2.waitKey(1000) & 0xff) == 27:
print('User requested cancellation.')
coord.request_stop()
break
except tf.errors.OutOfRangeError:
print('Read all examples.')
finally:
coord.request_stop()
coord.join(threads)
coord.wait_for_stop()
cv2.destroyWindow(window)