def find_squares(img):
img = cv2.GaussianBlur(img, (5, 5), 0)
squares = []
for gray in cv2.split(img):
for thrs in xrange(0, 255, 26):
if thrs == 0:
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
bin = cv2.dilate(bin, None)
else:
retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02 * cnt_len, True)
if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos(cnt[i], cnt[(i + 1) % 4], cnt[(i + 2) % 4]) for i in xrange(4)])
if max_cos < 0.1:
squares.append(cnt)
return squares
python类split()的实例源码
def find_squares(img):
img = cv2.GaussianBlur(img, (5, 5), 0)
squares = []
for gray in cv2.split(img):
for thrs in xrange(0, 255, 26):
if thrs == 0:
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
bin = cv2.dilate(bin, None)
else:
_retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
contours, _hierarchy = find_contours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
area = cv2.contourArea(cnt)
if len(cnt) == 4 and 20 < area < 1000 and cv2.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
if max_cos < 0.1:
if (1 - (float(w) / float(h)) <= 0.07 and 1 - (float(h) / float(w)) <= 0.07):
squares.append(cnt)
return squares
clahe.py 文件源码
项目:fully-convolutional-network-semantic-segmentation
作者: alecng94
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def enhance(image_path, clip_limit=3):
image = cv2.imread(image_path)
# convert image to LAB color model
image_lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
# split the image into L, A, and B channels
l_channel, a_channel, b_channel = cv2.split(image_lab)
# apply CLAHE to lightness channel
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=(8, 8))
cl = clahe.apply(l_channel)
# merge the CLAHE enhanced L channel with the original A and B channel
merged_channels = cv2.merge((cl, a_channel, b_channel))
# convert iamge from LAB color model back to RGB color model
final_image = cv2.cvtColor(merged_channels, cv2.COLOR_LAB2BGR)
return cv2_to_pil(final_image)
def find_squares(img, cos_limit = 0.1):
print('search for squares with threshold %f' % cos_limit)
img = cv2.GaussianBlur(img, (5, 5), 0)
squares = []
for gray in cv2.split(img):
for thrs in xrange(0, 255, 26):
if thrs == 0:
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
bin = cv2.dilate(bin, None)
else:
retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
if max_cos < cos_limit :
squares.append(cnt)
else:
#print('dropped a square with max_cos %f' % max_cos)
pass
return squares
###
### Version V2. Collect meta-data along the way, with commentary added.
###
def get_color_medio(self, roi, a,b,imprimir = False):
xl,yl,ch = roi.shape
roiyuv = cv2.cvtColor(roi,cv2.COLOR_RGB2YUV)
roihsv = cv2.cvtColor(roi,cv2.COLOR_RGB2HSV)
h,s,v=cv2.split(roihsv)
mask=(h<5)
h[mask]=200
roihsv = cv2.merge((h,s,v))
std = np.std(roiyuv.reshape(xl*yl,3),axis=0)
media = np.mean(roihsv.reshape(xl*yl,3), axis=0)-60
mediayuv = np.mean(roiyuv.reshape(xl*yl,3), axis=0)
if std[0]<12 and std[1]<12 and std[2]<12:
#if (std[0]<15 and std[2]<15) or ((media[0]>100 or media[0]<25) and (std[0]>10)):
media = np.mean(roihsv.reshape(xl*yl,3), axis=0)
# el amarillo tiene 65 de saturacion y sobre 200
if media[1]<60: #and (abs(media[0]-30)>10):
# blanco
return [-10,0,0]
else:
return media
else:
return None
def cannyThresholding(self, contour_retrieval_mode = cv2.RETR_LIST):
'''
contour_retrieval_mode is passed through as second argument to cv2.findContours
'''
# Attempt to match edges found in blue, green or red channels : collect all
channel = 0
for gray in cv2.split(self.img):
channel += 1
print('channel %d ' % channel)
title = self.tgen.next('channel-%d' % channel)
if self.show: ImageViewer(gray).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
found = {}
for thrs in xrange(0, 255, 26):
print('Using threshold %d' % thrs)
if thrs == 0:
print('First step')
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
title = self.tgen.next('canny-%d' % channel)
if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
bin = cv2.dilate(bin, None)
title = self.tgen.next('canny-dilate-%d' % channel)
if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
else:
retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
title = self.tgen.next('channel-%d-threshold-%d' % (channel, thrs))
if self.show: ImageViewer(bin).show(window='Next threshold (n to continue)', destroy = self.destroy, info = self.info, thumbnailfn = title)
bin, contours, hierarchy = cv2.findContours(bin, contour_retrieval_mode, cv2.CHAIN_APPROX_SIMPLE)
title = self.tgen.next('channel-%d-threshold-%d-contours' % (channel, thrs))
if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
if contour_retrieval_mode == cv2.RETR_LIST or contour_retrieval_mode == cv2.RETR_EXTERNAL:
filteredContours = contours
else:
filteredContours = []
h = hierarchy[0]
for component in zip(contours, h):
currentContour = component[0]
currentHierarchy = component[1]
if currentHierarchy[3] < 0:
# Found the outermost parent component
filteredContours.append(currentContour)
print('Contours filtered. Input %d Output %d' % (len(contours), len(filteredContours)))
time.sleep(5)
for cnt in filteredContours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
cnt_len = len(cnt)
cnt_area = cv2.contourArea(cnt)
cnt_isConvex = cv2.isContourConvex(cnt)
if cnt_len == 4 and (cnt_area > self.area_min and cnt_area < self.area_max) and cnt_isConvex:
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
if max_cos < self.cos_limit :
sq = Square(cnt, cnt_area, cnt_isConvex, max_cos)
self.squares.append(sq)
else:
#print('dropped a square with max_cos %f' % max_cos)
pass
found[thrs] = len(self.squares)
print('Found %d quadrilaterals with threshold %d' % (len(self.squares), thrs))
def DataRowFromAFW(anno, root=''): # Assume data comming from parsed anno-v7.mat file.
name = str(anno[0][0])
bbox = anno[1][0][0]
# yaw, pitch, roll = anno[2][0][0][0]
lm = anno[3][0][0] # 6 landmarks
if np.isnan(lm).any():
return None # Fail
d = DataRow()
d.path = os.path.join(root, name).replace("\\", "/")
d.name = os.path.split(d.path)[-1]
d.image = cv2.imread(d.path)
d.leftEye = (float(lm[0][0]), float(lm[0][1]))
d.rightEye = (float(lm[1][0]), float(lm[1][1]))
d.middle = (float(lm[2][0]), float(lm[2][1]))
d.leftMouth = (float(lm[3][0]), float(lm[3][1]))
# skip point 4 middle mouth - We take 0 left eye, 1 right eye, 2 nose, 3 left mouth, 5 right mouth
d.rightMouth = (float(lm[5][0]), float(lm[5][1]))
return d
def maxImagen(img, tamanyo):
''''''
bOri, gOri, rOri = cv2.split(img)
filas,columnas,canales = img.shape
#pad_size = tamanyo/2
#padded_max = np.pad(img, (pad_size, pad_size),'constant',constant_values=np.inf)
max_channel = np.zeros((filas,columnas))
for r in range(1,filas):
for c in range(1,columnas):
window_b = bOri[r:r+tamanyo,c:c+tamanyo]
window_g = gOri[r:r+tamanyo,c:c+tamanyo]
window_r = rOri[r:r+tamanyo,c:c+tamanyo]
max_bg = np.max(window_b+window_g)
max_r = np.max(window_r)
max_ch = max_r-max_bg #(max_r-max_bg)+np.absolute(np.min(max_r-max_bg))
max_ch_array = np.array([max_ch])
max_channel[r,c] = max_ch_array
min_max_channel = np.min(max_channel)
background_bOri = np.mean(bOri*min_max_channel)
background_gOri = np.mean(gOri*min_max_channel)
BbOri = np.absolute(background_bOri)
BgOri = np.absolute(background_gOri)
return BbOri, BgOri #max_channel,
def extract_color( src, h_th_low, h_th_up, s_th, v_th ):
hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
if h_th_low > h_th_up:
ret, h_dst_1 = cv2.threshold(h, h_th_low, 255, cv2.THRESH_BINARY)
ret, h_dst_2 = cv2.threshold(h, h_th_up, 255, cv2.THRESH_BINARY_INV)
dst = cv2.bitwise_or(h_dst_1, h_dst_2)
else:
ret, dst = cv2.threshold(h, h_th_low, 255, cv2.THRESH_TOZERO)
ret, dst = cv2.threshold(dst, h_th_up, 255, cv2.THRESH_TOZERO_INV)
ret, dst = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY)
ret, s_dst = cv2.threshold(s, s_th, 255, cv2.THRESH_BINARY)
ret, v_dst = cv2.threshold(v, v_th, 255, cv2.THRESH_BINARY)
dst = cv2.bitwise_and(dst, s_dst)
dst = cv2.bitwise_and(dst, v_dst)
return dst
def writeResults(DestinationFolder,resultFolder,name_array,classifier,Y_predicted):
print(len(name_array))
if not os.path.exists(resultFolder):
os.mkdir(resultFolder)
size_m = 0
i = 0
j = 0
lc = 0
while size_m < len(name_array):
current = cv2.imread(DestinationFolder+name_array[size_m]+"_final_candidates.bmp")
#print(DestinationFolder+name_array[size_m]+"_final_candidates.bmp.bmp")
#print("current ka size",current.shape)
x,current_m,z = cv2.split(current)
#print(count_ones(current_m,255),"now again check",name_array[size_m])
i = 0
while i < current_m.shape[0]:
j = 0
while j < current_m.shape[1]:
if current_m[i,j] == 255:
current_m[i,j] = 255*Y_predicted[lc]
lc = lc + 1
j = j + 1
i = i + 1
cv2.imwrite(resultFolder+name_array[size_m]+classifier+"_result.bmp",current_m)
size_m = size_m + 1
def draw(self, frame):
#rgbs = cv2.split(frame)
#hsvs = cv2.split(hsv)
#cv2.imshow("Hue", hsvs[0])
#cv2.imshow("Frame", frame)
#cv2.waitKey(1)
#cv2.imshow("Red", rgbs[0])
#cv2.imshow("Green", rgbs[1])
#cv2.imshow("Blue", rgbs[2])
#cv2.imshow("Saturation", hsvs[1])
#cv2.imshow("Value", hsvs[2])
cv2.imshow(self.params['name'], frame)
cv2.waitKey(1)
def draw_images(self, frame, hsv, mask_ball, mask_arena, arena_center, arena_ring_radius=None):
self.draw_history(frame, 'ball')
self.draw_history(frame, 'arena')
if arena_ring_radius is not None:
cv2.circle(frame, arena_center, arena_ring_radius, (0, 128, 255), 2)
return frame
#rgbs = cv2.split(frame)
#hsvs = cv2.split(hsv)
#cv2.imshow("Hue", hsvs[0])
#cv2.imshow("Mask ball", mask_ball)
#cv2.imshow("Mask arena", mask_arena)
#cv2.imshow("Frame", frame)
#cv2.waitKey(1)
#cv2.imshow("Red", rgbs[0])
#cv2.imshow("Green", rgbs[1])
#cv2.imshow("Blue", rgbs[2])
#cv2.imshow("Saturation", hsvs[1])
#cv2.imshow("Value", hsvs[2])
#cv2.waitKey(1)
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
sat_shift_limit=(-255, 255),
val_shift_limit=(-255, 255), u=0.5):
if np.random.random() < u:
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(image)
hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
h = cv2.add(h, hue_shift)
sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
s = cv2.add(s, sat_shift)
val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
v = cv2.add(v, val_shift)
image = cv2.merge((h, s, v))
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
psuedo_label_dataset_generator.py 文件源码
项目:unet-tensorflow
作者: timctho
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
sat_shift_limit=(-255, 255),
val_shift_limit=(-255, 255), u=0.5):
if np.random.random() < u:
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(image)
hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
h = cv2.add(h, hue_shift)
sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
s = cv2.add(s, sat_shift)
val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
v = cv2.add(v, val_shift)
image = cv2.merge((h, s, v))
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
def get_rescaled(fname, metadata, directory, rescaled_directory):
# TODO(dek): move rescaling to its own function
rescaled_fname = fname + ".rescaled.png"
rescaled = os.path.join(rescaled_directory, rescaled_fname)
if not os.path.exists(rescaled):
print "Unable to find cached rescaled image for", fname
return None
image = cv2.imread(rescaled, cv2.IMREAD_UNCHANGED)
if image is None:
print "Failed to read image from", rescaled
return None
b_channel, g_channel, r_channel = cv2.split(image)
alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 255
image = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
return image
def load(self):
if self.path is None:
print "Current path is empty!"
print "Please set one!"
else:
try:
# Return a 3-channel color image
self.image = cv2.imread(self.path)
# cv2.imshow('f', self.image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
except:
raise ValueError('Loading error!')
# convert RGB to HSV
self.hsv = cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV)
# split image into HSV channels
self.h, self.s, self.v = cv2.split(self.hsv)
# Apply Gaussian noise and save
def extractPlantsArea(self, arg_mode=0,arg_INV= False, b_threshold=80, a_threshold=80):
zeros = np.zeros(self.image.shape[:2], dtype = "uint8")
imgLAB = cv2.cvtColor(self.image, self.colorSpace)
(L, A, B) = cv2.split(imgLAB)
cv2.imwrite('Debug/imgB.jpg',B)
cv2.imwrite('Debug/imgA.jpg',A)
#(T_weeds_b, thresh_weeds_b) = cv2.threshold(B, b_threshold, 255, cv2.THRESH_BINARY)
#(T_weeds_a, thresh_weeds_a) = cv2.threshold(A, a_threshold, 255, cv2.THRESH_BINARY)
if arg_mode==0:
thresh_weeds_a= imgProcess_tool.binarialization(A,0,arg_INV, a_threshold)
thresh_weeds_b= imgProcess_tool.binarialization(B,0,arg_INV, b_threshold)
elif arg_mode==1:
thresh_weeds_b= imgProcess_tool.binarialization(B, 1, arg_INV)
thresh_weeds_a= imgProcess_tool.binarialization(A, 1, arg_INV)
elif arg_mode==2:
thresh_weeds_b= imgProcess_tool.binarialization(B, 2, arg_INV)
thresh_weeds_a= imgProcess_tool.binarialization(A, 2, arg_INV)
cv2.imwrite('Debug/imgB_thr.jpg',thresh_weeds_b)
cv2.imwrite('Debug/imgA_thr.jpg',thresh_weeds_a)
imgRGB = cv2.merge([zeros, thresh_weeds_b, thresh_weeds_a])
return thresh_weeds_a, thresh_weeds_b
def mean_squares(self):
self.threshold = 0.00000000001
b_i,g_i,r_i = cv2.split(self.image_i)
b_j,g_j,r_j = cv2.split(self.image_j)
error_b = b_i - b_j
error_g = g_i - g_j
error_r = r_i - r_j
error_b = error_b.flatten()
error_g = error_g.flatten()
error_r = error_r.flatten()
mse_b = float(numpy.dot(error_b,error_b))/len(error_b)
mse_g = float(numpy.dot(error_g,error_g))/len(error_g)
mse_r = float(numpy.dot(error_r,error_r))/len(error_r)
self.measure = (mse_b + mse_g + mse_r)/3
self.assertLess(self.measure, self.threshold)
print self.measure
def mean_squares(self):
self.threshold = 0.00000000001
b_i,g_i,r_i = cv2.split(self.image_i)
b_j,g_j,r_j = cv2.split(self.image_j)
error_b = b_i - b_j
error_g = g_i - g_j
error_r = r_i - r_j
error_b = error_b.flatten()
error_g = error_g.flatten()
error_r = error_r.flatten()
mse_b = float(numpy.dot(error_b,error_b))/len(error_b)
mse_g = float(numpy.dot(error_g,error_g))/len(error_g)
mse_r = float(numpy.dot(error_r,error_r))/len(error_r)
self.measure = (mse_b + mse_g + mse_r)/3
self.assertLess(self.measure, self.threshold)
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
sat_shift_limit=(-255, 255),
val_shift_limit=(-255, 255), u=0.5):
if np.random.random() < u:
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(image)
hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
h = cv2.add(h, hue_shift)
sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
s = cv2.add(s, sat_shift)
val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
v = cv2.add(v, val_shift)
image = cv2.merge((h, s, v))
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
def main():
imgList = getImageList(input_folder='/home/jin/shenzhenyuan/head-segmentation/input/test',
output_file='/home/jin/shenzhenyuan/head-segmentation/input/testSet.txt')
for img_path in imgList:
img = cv2.imread('{}'.format(img_path))
if img_path[:img_path.rfind('.')].endswith('png'):
str = img_path[:img_path.rfind('.')] + '-seg.png'
else:
str = img_path[:img_path.rfind('.')] + '.png-seg.png'
mask = cv2.imread('{}'.format(str))
prob = mask[:,:,0:2] / 255.0
prob[:, :, 1] = 1 - prob[:, :, 0]
res, Q = denseCRF(img, prob)
a = 1-res
a = a.astype('uint8')
r_channel, g_channel, b_channel = cv2.split(img)
img_rgba = cv2.merge((r_channel, g_channel, b_channel, a*255))
cv2.imwrite('{}_crf.png'.format(img_path[:img_path.find('.')]), img_rgba)
# a = np.dstack((a,)*3)
# plt.imshow(a*img)
# cv2.imwrite('{}_crf.png'.format(img_path[:img_path.find('.')]), (a>0.1)*img)
cv2.imwrite('{}_crf_qtsu.png'.format(img_path[:img_path.find('.')]), cropHead(Q, img))
def flows_to_img(flows):
"""Pyfunc wrapper to transorm flow vectors in color coding"""
def _flow_transform(flows):
""" Tensorflow Pyfunc to transorm flow to color coding"""
flow_imgs = []
for flow in flows:
img = computeColor.computeImg(flow)
# cv2 returns bgr images
b, g, r = cv2.split(img)
img = cv2.merge((r, g, b))
flow_imgs.append(img)
return [flow_imgs]
flow_imgs = tf.py_func(_flow_transform, [flows],
[tf.uint8], stateful=False, name='flow_transform')
flow_imgs = tf.squeeze(tf.stack(flow_imgs))
flow_imgs.set_shape([FLAGS.batchsize] + FLAGS.d_shape_img)
return flow_imgs
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
sat_shift_limit=(-255, 255),
val_shift_limit=(-255, 255), u=0.5):
if np.random.random() < u:
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(image)
hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
h = cv2.add(h, hue_shift)
sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
s = cv2.add(s, sat_shift)
val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
v = cv2.add(v, val_shift)
image = cv2.merge((h, s, v))
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
def test():
displace()
start = time.clock()
newCalibrate()
exposure = WebCam.getExposure()
print time.clock() - start, "TOTAL TIME"
while display:
image = WebCam.getImage()
contours = GripRunner.run(image)
Printing.drawContours(image, contours)
Printing.display(image)
cv2.waitKey(20)
# Get average value at the end of test to recalibrate targetAverage
# image = cv2.imread('TestImages/Cancer.jpg')
# image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# value = cv2.split(image)[2]
# # value = np.array([image[:,:,2]])
# average = cv2.mean(value)
# print average
def bgr_to_rgb(img):
b, g, r = cv2.split(img)
return cv2.merge([r, g, b])
def hsv_threshold(img, hue_min, hue_max, sat_min, sat_max, val_min, val_max):
"""
Threshold an HSV image given separate min/max values for each channel.
:param img: an hsv image
:param hue_min:
:param hue_max:
:param sat_min:
:param sat_max:
:param val_min:
:param val_max:
:return: result of the threshold (each binary channel AND'ed together)
"""
hue, sat, val = cv2.split(img)
hue_bin = np.zeros(hue.shape, dtype=np.uint8)
sat_bin = np.zeros(sat.shape, dtype=np.uint8)
val_bin = np.zeros(val.shape, dtype=np.uint8)
cv2.inRange(hue, hue_min, hue_max, hue_bin)
cv2.inRange(sat, sat_min, sat_max, sat_bin)
cv2.inRange(val, val_min, val_max, val_bin)
bin = np.copy(hue_bin)
cv2.bitwise_and(sat_bin, bin, bin)
cv2.bitwise_and(val_bin, bin, bin)
return bin
def extract(self,ori_wmimage,wm,key=10):
B = ori_wmimage
if len(ori_wmimage.shape ) > 2 :
(B,G,R) = cv2.split(cv2.cvtColor(ori_wmimage, cv2.COLOR_BGR2YUV))
signature = BlindWatermark._gene_signature(wm,256,key).flatten()
ext_sig = self.inner_extract(B,signature)
return BlindWatermark.calc_sim(signature,ext_sig)
def add_alpha_channel(img):
# img = cv2.imread(path)
b_channel, g_channel, r_channel = cv2.split(img)
alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 255 #creating a dummy alpha channel image.
return cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
def assert_dir(dir_path):
potential_out_dir = dir_path
idx = -1
while os.path.isdir(potential_out_dir):
idx += 1
if idx == 0:
potential_out_dir += "_0"
continue
potential_out_dir = "_".join( potential_out_dir.split("_")[:-1] ) + "_" + str(idx)
out_dir = potential_out_dir
os.mkdir(out_dir)
print "[+] Created " + out_dir + ". and will save output to that directory"
return out_dir
def run_pathanalyzer(obj,app=None,forcereload=True,createFC=False):
''' split path into intervals '''
import reconstruction.pathanalyser
if forcereload: reload(reconstruction.pathanalyser)
if app <> None:
FreeCAD.app=app
try: obj.Proxy.pl2
except:
sayexc("no data - run FindPathes first")
errorDialog("no data - run FindPathes first")
return
try: widget=obj.Proxy.analyzer
except: widget=None
hideApprox=obj.hideApproximation
if obj.pathSelection: # process selected path
analyzer=reconstruction.pathanalyser.runsel(obj.N,obj.Threshold,widget,createFC,obj)
elif obj.pathId==-1: # process pathObject
analyzer=reconstruction.pathanalyser.runobj(obj.pathObject,obj.N,obj.Threshold,widget,createFC,obj)
else: # process object by index number
analyzer=reconstruction.pathanalyser.run(obj.Proxy.pl2,obj.pathId,obj.N,obj.Threshold,widget,createFC,obj)
obj.Proxy.analyzer=analyzer
return