def process(frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (84, 84), interpolation=cv2.INTER_AREA)
return frame.reshape(84, 84, 1)
python类COLOR_RGB2GRAY的实例源码
atari_wrappers_deprecated.py 文件源码
项目:combine-DT-with-NN-in-RL
作者: Burning-Bear
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
atari_wrappers_deprecated.py 文件源码
项目:combine-DT-with-NN-in-RL
作者: Burning-Bear
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def process(frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (84, 84), interpolation=cv2.INTER_AREA)
return frame.reshape(84, 84, 1)
def animpingpong(self):
obj=self.Object
img=None
if not obj.imageFromNode:
img = cv2.imread(obj.imageFile)
else:
img = obj.imageNode.ViewObject.Proxy.img.copy()
print (obj.blockSize,obj.ksize,obj.k)
try:
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
print "normale"
except:
im2=cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
gray = cv2.cvtColor(im2,cv2.COLOR_RGB2GRAY)
print "except"
dst = cv2.cornerHarris(gray,obj.blockSize,obj.ksize*2+1,obj.k/10000)
dst = cv2.dilate(dst,None)
img[dst>0.01*dst.max()]=[0,0,255]
dst2=img.copy()
dst2[dst<0.01*dst.max()]=[255,255,255]
dst2[dst>0.01*dst.max()]=[0,0,255]
if not obj.matplotlib:
cv2.imshow(obj.Label,img)
else:
from matplotlib import pyplot as plt
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(dst2,cmap = 'gray')
plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
plt.show()
self.img=img
util.py 文件源码
项目:tensorflow-action-conditional-video-prediction
作者: williamd4112
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def _transform_frame_color_space_np(x):
return cv2.cvtColor(x, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis]
util.py 文件源码
项目:tensorflow-action-conditional-video-prediction
作者: williamd4112
项目源码
文件源码
阅读 34
收藏 0
点赞 0
评论 0
def _transform_state_color_space_np(s):
# s: [h, w, c*num_frame]
num_splits = int(s.shape[-1] / 3)
return np.concatenate([cv2.cvtColor(x, cv2.COLOR_RGB2GRAY)[:,:,np.newaxis] for x in np.split(s, num_splits, axis=2)], axis=2)
def _transform_frame_color_space_np(x):
return cv2.cvtColor(x, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis]
def _transform_state_color_space_np(s):
# s: [h, w, c*num_frame]
num_splits = int(s.shape[-1] / 3)
return np.concatenate([cv2.cvtColor(x, cv2.COLOR_RGB2GRAY)[:,:,np.newaxis] for x in np.split(s, num_splits, axis=2)], axis=2)
atari_wrappers_deprecated.py 文件源码
项目:rl-attack-detection
作者: yenchenlin
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def process(frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (84, 84), interpolation=cv2.INTER_AREA)
return frame.reshape(84, 84, 1)
def _observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
def binarize(img): #definindo binarização dos cortes
a = np.asarray(img)
a = cv2.cvtColor(a, cv2.COLOR_RGB2GRAY)
ret, imbin = cv2.threshold(a, 127, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY)
return Image.fromarray(imbin)
def _get_rois_opencv(self, file, mode='gray'):
cap = cv2.VideoCapture(file)
vidframes = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
rois = self._read_roi_file(file)
totalframes = rois.shape[0]
if totalframes != vidframes:
print('Roi Frames: %d\n' % totalframes)
print('Vid Frames: %d\n' % vidframes)
raise Exception('Mismatch between the actual number of video frames and the provided ROI _labels')
if mode == 'gray':
roi_seq = np.zeros((totalframes, self._yres, self._xres), dtype=np.float32)
elif mode == 'rgb':
roi_seq = np.zeros((totalframes, self._yres, self._xres, 3), dtype=np.float32)
else:
raise Exception('gray or rgb')
this_frame = 0
while cap.isOpened():
ret, frame = cap.read()
if ret is False:
break
if mode == 'gray':
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
gray_roi = _crop_roi(gray, rois[this_frame, :])
resized = self._resize_frame(gray_roi)
elif mode == 'rgb':
rgb_roi = _crop_roi(frame, rois[this_frame, :])
resized = self._resize_frame(rgb_roi)
else:
raise Exception('gray or rgb')
roi_seq[this_frame, :, :] = resized
this_frame += 1
return roi_seq
def rgb2gray(img):
"""
Convert an RGB image to grayscale
:param img: image to convert
:return:
"""
if OPENCV_AVAILABLE:
from cv2 import cvtColor, COLOR_RGB2GRAY
return cvtColor(img, COLOR_RGB2GRAY)
elif PILLOW_AVAILABLE:
from PIL import Image
return np.array(Image.fromarray(img).convert('L'))
def test(self, toWait = 0.2):
""" TESTING METHOD
You can run it to see if the preprocessing is well done.
Wait few seconds for loading, then diaporama appears with image and highlighted joints
/!\ Use Esc to quit
Args:
toWait : In sec, time between pictures
"""
self._create_train_table()
self._create_sets()
for i in range(len(self.train_set)):
img = self.open_img(self.train_set[i])
w = self.data_dict[self.train_set[i]]['weights']
padd, box = self._crop_data(img.shape[0], img.shape[1], self.data_dict[self.train_set[i]]['box'], self.data_dict[self.train_set[i]]['joints'], boxp= 0.0)
new_j = self._relative_joints(box,padd, self.data_dict[self.train_set[i]]['joints'], to_size=256)
rhm = self._generate_hm(256, 256, new_j,256, w)
rimg = self._crop_img(img, padd, box)
# See Error in self._generator
#rimg = cv2.resize(rimg, (256,256))
rimg = scm.imresize(rimg, (256,256))
#rhm = np.zeros((256,256,16))
#for i in range(16):
# rhm[:,:,i] = cv2.resize(rHM[:,:,i], (256,256))
grimg = cv2.cvtColor(rimg, cv2.COLOR_RGB2GRAY)
cv2.imshow('image', grimg / 255 + np.sum(rhm,axis = 2))
# Wait
time.sleep(toWait)
if cv2.waitKey(1) == 27:
print('Ended')
cv2.destroyAllWindows()
break
# ------------------------------- PCK METHODS-------------------------------
def resize_image(image, width, height):
"""
Resize the image screen to the configured width and height and
convert it to grayscale.
"""
grayscale = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
return cv2.resize(grayscale, (width, height))
def rgb2gray(img):
"""
Convert an RGB image to grayscale
:param img: image to convert
:return:
"""
if OPENCV_AVAILABLE:
from cv2 import cvtColor, COLOR_RGB2GRAY
return cvtColor(img, COLOR_RGB2GRAY)
elif PILLOW_AVAILABLE:
from PIL import Image
return np.array(Image.fromarray(img).convert('L'))
def get_example(self, i):
# type: (any) -> typing.Tuple[str, Image]
path, image = super().get_example(i)
image_array = numpy.asarray(image)
image_height, image_width = image_array.shape[:2]
if len(image_array.shape) == 2: # gray image
gray = image_array
else:
gray = cv2.cvtColor(image_array, cv2.COLOR_RGB2GRAY)
facerects = self.classifier.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(64, 64))
if len(facerects) == 0:
return path, None # more sophisticated way to handle errors?
x, y, width, _ = facerects[0]
margin = int(width * self.margin_ratio)
if min(
y, image_height - y - width,
x, image_width - x - width,
) < margin: # cannot crop
return path, None
cropped = image_array[y - margin:y + width + margin, x - margin:x + width + margin]
if self.output_resize is None:
return path, Image.fromarray(cropped)
else:
return path, Image.fromarray(cropped).resize(self.output_resize)
def take_photo(self):
""" Take photo and prepare to write, then send to PyImgur (optional).
"""
faces = detect_faces(face_detection, self.photo)
gray_image = cv2.cvtColor(self.photo, cv2.COLOR_RGB2GRAY)
self.draw_hats(self.photo, faces)
player_data = self.predict_emotions(faces, gray_image)
self.rank_players(player_data)
self.save_photo()
def intensity(image):
"""
Converts a color image into grayscale.
Used as `channel' argument to function `features'
"""
return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
def _preprocess(self, obs):
#gray = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
# for breakout
#gray = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)[34:210]
gray = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
resized_gray = cv2.resize(gray, (84, 84))
"""
cv2.namedWindow("window")
cv2.imshow("window", gray)
cv2.waitKey(0)
cv2.destroyAllWindows()
"""
del obs, gray
return (resized_gray - 127.5) / 127.5
def _preprocess(self, obs):
gray = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
# for breakout
#gray = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)[34:210]
gray = cv2.resize(gray, (84, 84))
"""
cv2.namedWindow("window")
cv2.imshow("window", gray)
cv2.waitKey(0)
cv2.destroyAllWindows()
"""
return (gray - 127.5) / 127.5