def sharpen_deblur(image):
img = cv2.imread(image)
output = cv2.GaussianBlur(img, (0, 0), 25)
output = cv2.addWeighted(img, 1.75, output, -0.75, 0)
os.remove(image)
cv2.imwrite(image, output)
python类addWeighted()的实例源码
def predict_image(flag):
t_start = cv2.getTickCount()
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.9
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
with open(os.path.join(flag.ckpt_dir, flag.ckpt_name, 'model.json'), 'r') as json_file:
loaded_model_json = json_file.read()
model = model_from_json(loaded_model_json)
weight_list = sorted(glob(os.path.join(flag.ckpt_dir, flag.ckpt_name, "weight*")))
model.load_weights(weight_list[-1])
print "[*] model load : %s"%weight_list[-1]
t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000
print "[*] model loading Time: %.3f ms"%t_total
imgInput = cv2.imread(flag.test_image_path, 0)
input_data = imgInput.reshape((1,256,256,1))
t_start = cv2.getTickCount()
result = model.predict(input_data, 1)
t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000
print "Predict Time: %.3f ms"%t_total
imgMask = (result[0]*255).astype(np.uint8)
imgShow = cv2.cvtColor(imgInput, cv2.COLOR_GRAY2BGR)
_, imgMask = cv2.threshold(imgMask, int(255*flag.confidence_value), 255, cv2.THRESH_BINARY)
imgMaskColor = cv2.applyColorMap(imgMask, cv2.COLORMAP_JET)
# imgZero = np.zeros((256,256), np.uint8)
# imgMaskColor = cv2.merge((imgZero, imgMask, imgMask))
imgShow = cv2.addWeighted(imgShow, 0.9, imgMaskColor, 0.3, 0.0)
output_path = os.path.join(flag.output_dir, os.path.basename(flag.test_image_path))
cv2.imwrite(output_path, imgShow)
print "SAVE:[%s]"%output_path
def train_visualization_seg(self, model, epoch):
image_name_list = sorted(glob(os.path.join(self.flag.data_path,'train/IMAGE/*/*.png')))
print image_name_list
image_name = image_name_list[-1]
image_size = self.flag.image_size
imgInput = cv2.imread(image_name, self.flag.color_mode)
output_path = self.flag.output_dir
input_data = imgInput.reshape((1,image_size,image_size,self.flag.color_mode*2+1))
t_start = cv2.getTickCount()
result = model.predict(input_data, 1)
t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000
print "[*] Predict Time: %.3f ms"%t_total
imgMask = (result[0]*255).astype(np.uint8)
imgShow = cv2.cvtColor(imgInput, cv2.COLOR_GRAY2BGR)
imgMaskColor = cv2.applyColorMap(imgMask, cv2.COLORMAP_JET)
imgShow = cv2.addWeighted(imgShow, 0.9, imgMaskColor, 0.4, 0.0)
output_path = os.path.join(self.flag.output_dir, '%04d_'%epoch+os.path.basename(image_name))
cv2.imwrite(output_path, imgShow)
# print "SAVE:[%s]"%output_path
# cv2.imwrite(os.path.join(output_path, 'img%04d.png'%epoch), imgShow)
# cv2.namedWindow("show", 0)
# cv2.resizeWindow("show", 800, 800)
# cv2.imshow("show", imgShow)
# cv2.waitKey(1)
def weighted_img(img, initial_img, alpha=0.8, beta=1., lamda=0.):
return cv2.addWeighted(initial_img, alpha, img, beta, lamda)
def processFrame(self):
# If we are enhancing the image
if self.enhance:
# Frangi vesselness to highlight tubuar structures
gray = cv2.cvtColor(self.sourceFrame, cv2.COLOR_BGR2GRAY)
tub = tubes(gray, [5, 12])
tubular = cv2.cvtColor(tub, cv2.COLOR_GRAY2BGR)
# Merge with original to ennhance tubular structures
high = 0.3
rest = 1.0 - high
colorized = cv2.addWeighted(self.sourceFrame, rest, tubular, high, 0.0)
# colorized = cv2.add(self.sourceFrame, tubular)
# Tile horizontally
self.processedFrame = np.concatenate((self.sourceFrame,
tubular,
colorized),
axis=1)
else:
self.processedFrame = self.sourceFrame;
self.workingFrame = self.processedFrame.copy()
# If we are tracking, track and show analysis
if self.tracking is True:
self.trackObjects()
self.showBehavior()
def create_checkpoint_mask(img, mask, predicted_mask):
p_mask = predicted_mask
assert p_mask.shape[0] < p_mask.shape[1]
if p_mask.shape == (CARVANA_H, CARVANA_W + 2):
p_mask = p_mask[:, 1:-1]
else:
p_mask = cv2.resize(p_mask, (CARVANA_W, CARVANA_H),
interpolation=cv2.INTER_NEAREST)
p_mask = (p_mask > 0.5).astype(np.uint8)
true_mask = mask_to_bgr(mask, 0, 255, 0)
p_mask = mask_to_bgr(p_mask, 0, 0, 255)
w = cv2.addWeighted(img, 1.0, true_mask, 0.3, 0)
w = cv2.addWeighted(w, 1.0, p_mask, 0.5, 0)
return w
def putTextAlpha(img, text, alpha, org, fontFace, fontScale, color,
thickness): # , lineType=None
'''
Extends cv2.putText with [alpha] argument
'''
x, y = cv2.getTextSize(text, fontFace,
fontScale, thickness)[0]
ox, oy = org
imgcut = img[oy - y - 3:oy, ox:ox + x]
if img.ndim == 3:
txtarr = np.zeros(shape=(y + 3, x, 3), dtype=np.uint8)
else:
txtarr = np.zeros(shape=(y + 3, x), dtype=np.uint8)
cv2.putText(txtarr, text, (0, y), fontFace,
fontScale, color,
thickness=thickness
#, lineType=lineType
)
cv2.addWeighted(txtarr, alpha, imgcut, 1, 0, imgcut, -1)
return img
def drawOpacityCircle(self, x, y, colorR, colorG, colorB, radius, thickness):
overlay = self.frame.copy()
cv2.circle(overlay, (x, y), radius, (colorB, colorG, colorR), thickness)
alpha = 0.25
cv2.addWeighted(overlay, alpha, self.frame, 1 - alpha, 0, self.frame)
def generate( self ):
# Create black background image and fill it up with random polygons
img = np.zeros((self.height, self.width, 3), np.uint8)
overlay = img.copy()
output = img.copy()
for i in range(self.size):
info = self.genes[i].getInfo()
if self.type == 1:
cv2.circle(overlay,info[0], info[1], info[2], -1)
cv2.addWeighted(overlay, info[3], output, 1 - info[3], 0, output)
elif self.type == 2:
cv2.ellipse(overlay,info[0],info[1],info[2],0,360,info[3],-1)
cv2.addWeighted(overlay, info[4], output, 1 - info[4], 0, output)
elif self.type == 3:
cv2.fillConvexPoly(overlay,np.asarray(info[0]), info[1])
cv2.addWeighted(overlay, info[2], output, 1 - info[2], 0, output)
elif self.type == 4:
cv2.fillConvexPoly(overlay, np.asarray(info[0]), info[1])
cv2.addWeighted(overlay, info[2], output, 1 - info[2], 0, output )
return output
def outlining(img):
#kernel size
kernel_size=3
#-------------------------------------------------
#bilateral filter, sharpen, thresh image
biblur=cv2.bilateralFilter(img,20,175,175)
sharp=cv2.addWeighted(img,1.55,biblur,-0.5,0)
ret1,thresh1 = cv2.threshold(sharp,127,255,cv2.THRESH_OTSU)
#negative and closed image
inv=cv2.bitwise_not(thresh1)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size))
closed = cv2.morphologyEx(inv, cv2.MORPH_CLOSE, kernel)
return closed
def getRGBS(img, PLOT = False):
image = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
# grab the image channels, initialize the tuple of colors,
# the figure and the flattened feature vector
features = []
featuresSobel = []
Grayscale = cv2.cvtColor(img, cv2.cv.CV_BGR2GRAY)
histG = cv2.calcHist([Grayscale], [0], None, [16], [0, 256])
histG = histG / histG.sum()
features.extend(histG[:,0].tolist())
grad_x = np.abs(cv2.Sobel(Grayscale, cv2.CV_16S, 1, 0, ksize = 3, scale = 1, delta = 0, borderType = cv2.BORDER_DEFAULT))
grad_y = np.abs(cv2.Sobel(Grayscale, cv2.CV_16S, 0, 1, ksize = 3, scale = 1, delta = 0, borderType = cv2.BORDER_DEFAULT))
abs_grad_x = cv2.convertScaleAbs(grad_x)
abs_grad_y = cv2.convertScaleAbs(grad_y)
dst = cv2.addWeighted(abs_grad_x,0.5,abs_grad_y,0.5,0)
histSobel = cv2.calcHist([dst], [0], None, [16], [0, 256])
histSobel = histSobel / histSobel.sum()
features.extend(histSobel[:,0].tolist())
Fnames = []
Fnames.extend(["Color-Gray"+str(i) for i in range(8)])
Fnames.extend(["Color-GraySobel"+str(i) for i in range(8)])
return features, Fnames
def get_gradient(im):
# Calculate the x and y gradients using Sobel operator
grad_x = cv2.Sobel(im,cv2.CV_32F,1,0,ksize=3)
grad_y = cv2.Sobel(im,cv2.CV_32F,0,1,ksize=3)
# Combine the two gradients
grad = cv2.addWeighted(np.absolute(grad_x), 0.5, np.absolute(grad_y), 0.5, 0)
# print grad.dtype
# print grad.shape
return grad
# Based on: http://www.learnopencv.com/image-alignment-ecc-in-opencv-c-python/
def ShowSolution(images, puzzle, solution, frame, box):
cell_size = np.array([box.w / 4, box.h / 4])
for piece_type, piece, i, j in solution:
top_left_loc = np.array([box.x, box.y]) + (np.array([j, i]) -
np.array([1, 1])) * cell_size
color = pieces.Colors[piece_type]
piece_img = np.zeros_like(frame)
for square in itertools.product(range(2), range(2)):
if piece[square] == board.SquareType.AIR:
continue
loc = top_left_loc + np.array(square[::-1]) * cell_size
piece_img = cv2.rectangle(piece_img, tuple(loc), tuple(loc + cell_size),
color, -2)
if piece[square] in images:
image = cv2.resize(images[piece[square]], tuple(cell_size))
blend = np.zeros_like(piece_img)
blend[loc[1]:loc[1] + cell_size[1], loc[0]:loc[0] + cell_size[
0]] = image
piece_img = cv2.addWeighted(piece_img, 1.0, blend, 1.0, 0)
piece_gray = cv2.cvtColor(piece_img, cv2.COLOR_RGB2GRAY)
_, piece_gray = cv2.threshold(piece_gray, 10, 255, cv2.THRESH_BINARY)
_, contours, _ = cv2.findContours(piece_gray, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
piece_img = cv2.drawContours(piece_img, contours, -1, (255, 255, 255), 3)
frame = cv2.addWeighted(frame, 1.0, piece_img, 0.7, 0)
cv2.imshow("Planes", frame)
def paintGL(self, sun_x, sun_y, sun_z, moon_x, moon_y, moon_z):
# Draw the sun
self.fbo.bind()
self.draw_sun(sun_x, sun_y, sun_z)
glFlush()
self.fbo.release()
image = self.fbo.toImage()
# Produce blurred image of sun
npimage = qimage_to_numpy(image)
h, w, b = npimage.shape
blur = cv2.GaussianBlur(npimage, (75, 75), 0, 0)
cv2.convertScaleAbs(blur, blur, 2, 1)
# Combine the blurred with the sun
combo = cv2.addWeighted(blur, 0.5, npimage, 0.5, -1)
h, w, b = combo.shape
qimage = QtGui.QImage(combo.data,w,h,QtGui.QImage.Format_ARGB32).rgbSwapped()
self.fbo.bind()
device = QtGui.QOpenGLPaintDevice(RES_X, RES_Y)
painter = QtGui.QPainter()
painter.begin(device)
rect = QtCore.QRect(0, 0, RES_X, RES_Y)
# Draw the blurred sun/sun combo image on the screen
painter.drawImage(rect, qimage, rect)
painter.end()
self.fbo.release()
# Draw the moon
self.fbo.bind()
self.draw_moon(moon_x, moon_y, moon_z)
glFlush()
self.fbo.release()
def global_gradient(self):
gradient_values_x = cv2.Sobel(self.img, cv2.CV_64F, 1, 0, ksize=5)
gradient_values_y = cv2.Sobel(self.img, cv2.CV_64F, 0, 1, ksize=5)
gradient_magnitude = cv2.addWeighted(gradient_values_x, 0.5, gradient_values_y, 0.5, 0)
gradient_angle = cv2.phase(gradient_values_x, gradient_values_y, angleInDegrees=True)
return gradient_magnitude, gradient_angle
def watershed(self):
m = self.markers.copy()
cv2.watershed(self.img, m)
self.returnVar = m.copy()
overlay = self.colors[np.maximum(m, 0)]
vis = cv2.addWeighted(self.img, 0.5, overlay, 0.5, 0.0, dtype=cv2.CV_8UC3)
cv2.namedWindow('watershed', cv2.WINDOW_NORMAL)
cv2.moveWindow('watershed',780,200)
cv2.imshow('watershed', vis)
def _get_gradient_magnitude(im):
"Get magnitude of gradient for given image"
ddepth = cv2.CV_32F
dx = cv2.Sobel(im, ddepth, 1, 0)
dy = cv2.Sobel(im, ddepth, 0, 1)
dxabs = cv2.convertScaleAbs(dx)
dyabs = cv2.convertScaleAbs(dy)
mag = cv2.addWeighted(dxabs, 0.5, dyabs, 0.5, 0)
return np.average(mag)
def highlight_regions(image, region_rects):
# Darken image with mask
composite_image = cv2.addWeighted(image, 0.50, numpy.zeros(image.shape, dtype="uint8"), 0.50, 0)
# Highlight region_of_interest
for rect in region_rects:
(x1, x2, y1, y2) = (rect["x1"], rect["x2"], rect["y1"], rect["y2"])
composite_image[y1:y2, x1:x2] = image[y1:y2, x1:x2]
return composite_image
def process_an_image(img):
roi_vtx = np.array([[(0, img.shape[0]), (460, 325), (520, 325), (img.shape[1], img.shape[0])]])
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
blur_gray = cv2.GaussianBlur(gray, (blur_ksize, blur_ksize), 0, 0)
edges = cv2.Canny(blur_gray, canny_lthreshold, canny_hthreshold)
roi_edges = roi_mask(edges, roi_vtx)
line_img = hough_lines(roi_edges, rho, theta, threshold, min_line_length, max_line_gap)
res_img = cv2.addWeighted(img, 0.8, line_img, 1, 0)
# plt.figure()
# plt.imshow(gray, cmap='gray')
# plt.savefig('../resources/gray.png', bbox_inches='tight')
# plt.figure()
# plt.imshow(blur_gray, cmap='gray')
# plt.savefig('../resources/blur_gray.png', bbox_inches='tight')
# plt.figure()
# plt.imshow(edges, cmap='gray')
# plt.savefig('../resources/edges.png', bbox_inches='tight')
# plt.figure()
# plt.imshow(roi_edges, cmap='gray')
# plt.savefig('../resources/roi_edges.png', bbox_inches='tight')
# plt.figure()
# plt.imshow(line_img, cmap='gray')
# plt.savefig('../resources/line_img.png', bbox_inches='tight')
# plt.figure()
# plt.imshow(res_img)
# plt.savefig('../resources/res_img.png', bbox_inches='tight')
# plt.show()
return res_img
# img = mplimg.imread("../resources/lane.jpg")
# process_an_image(img)
def houghTransformAndRegionSelect(image, edges):
rho = 1
theta = np.pi/180
threshold = 1
min_line_length = 5
max_line_gap = 3
# Next we'll create a masked edges image using cv2.fillPoly()
mask = np.zeros_like(edges)
ignore_mask_color = 255
# This time we are defining a four sided polygon to mask
imshape = image.shape
vertices = np.array([[(0,imshape[0]),(450, 290), (490, 290), (imshape[1],imshape[0])]], dtype=np.int32)
cv2.fillPoly(mask, vertices, ignore_mask_color)
masked_edges = cv2.bitwise_and(edges, mask)
line_image = np.copy(image)*0
# Run Hough on edge detected image
# Output "lines" is an array containing endpoints of detected line segments
lines = cv2.HoughLinesP(masked_edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)
# Iterate over the output "lines" and draw lines on a blank image
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)
# Create a "color" binary image to combine with line image
color_edges = np.dstack((edges, edges, edges))
# Draw the lines on the edge image
lines_edges = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0)
return lines_edges