def get_match_confidence(img1, img2, mask=None):
if img1.shape != img2.shape:
return False
## first try, using absdiff
# diff = cv2.absdiff(img1, img2)
# h, w, d = diff.shape
# total = h*w*d
# num = (diff<20).sum()
# print 'is_match', total, num
# return num > total*0.90
if mask is not None:
img1 = img1.copy()
img1[mask!=0] = 0
img2 = img2.copy()
img2[mask!=0] = 0
## using match
match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
_, confidence, _, _ = cv2.minMaxLoc(match)
# print confidence
return confidence
python类absdiff()的实例源码
def diff_rect(img1, img2, pos=None):
"""find counters include pos in differences between img1 & img2 (cv2 images)"""
diff = cv2.absdiff(img1, img2)
diff = cv2.GaussianBlur(diff, (3, 3), 0)
edges = cv2.Canny(diff, 100, 200)
_, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
if not contours:
return None
contours.sort(key=lambda c: len(c))
# no pos provide, just return the largest different area rect
if pos is None:
cnt = contours[-1]
x0, y0, w, h = cv2.boundingRect(cnt)
x1, y1 = x0+w, y0+h
return (x0, y0, x1, y1)
# else the rect should contain the pos
x, y = pos
for i in range(len(contours)):
cnt = contours[-1-i]
x0, y0, w, h = cv2.boundingRect(cnt)
x1, y1 = x0+w, y0+h
if x0 <= x <= x1 and y0 <= y <= y1:
return (x0, y0, x1, y1)
def get_match_confidence(img1, img2, mask=None):
if img1.shape != img2.shape:
return False
## first try, using absdiff
# diff = cv2.absdiff(img1, img2)
# h, w, d = diff.shape
# total = h*w*d
# num = (diff<20).sum()
# print 'is_match', total, num
# return num > total*0.90
if mask is not None:
img1 = img1.copy()
img1[mask!=0] = 0
img2 = img2.copy()
img2[mask!=0] = 0
## using match
match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
_, confidence, _, _ = cv2.minMaxLoc(match)
# print confidence
return confidence
def diff_rect(img1, img2, pos=None):
"""find counters include pos in differences between img1 & img2 (cv2 images)"""
diff = cv2.absdiff(img1, img2)
diff = cv2.GaussianBlur(diff, (3, 3), 0)
edges = cv2.Canny(diff, 100, 200)
_, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
if not contours:
return None
contours.sort(key=lambda c: len(c))
# no pos provide, just return the largest different area rect
if pos is None:
cnt = contours[-1]
x0, y0, w, h = cv2.boundingRect(cnt)
x1, y1 = x0+w, y0+h
return (x0, y0, x1, y1)
# else the rect should contain the pos
x, y = pos
for i in range(len(contours)):
cnt = contours[-1-i]
x0, y0, w, h = cv2.boundingRect(cnt)
x1, y1 = x0+w, y0+h
if x0 <= x <= x1 and y0 <= y <= y1:
return (x0, y0, x1, y1)
def features(image, channel, levels=9, start_size=(1983, 1088), ):
"""
Extracts features by down-scaling the image levels times,
transforms the image by applying the function channel to
each scaled version and computing the difference between
the scaled, transformed versions.
image : the image
channel : a function which transforms the image into
another image of the same size
levels : number of scaling levels
start_size : tuple. The size of the biggest image in
the scaling pyramid. The image is first
scaled to that size and then scaled by half
levels times. Therefore, both entries in
start_size must be divisible by 2^levels.
"""
image = channel(image)
if image.shape != start_size:
image = cv2.resize(image, dsize=start_size)
scales = [image]
for l in xrange(levels - 1):
logger.debug("scaling at level %d", l)
scales.append(cv2.pyrDown(scales[-1]))
features = []
for i in xrange(1, levels - 5):
big = scales[i]
for j in (3,4):
logger.debug("computing features for levels %d and %d", i, i + j)
small = scales[i + j]
srcsize = small.shape[1],small.shape[0]
dstsize = big.shape[1],big.shape[0]
logger.debug("Shape source: %s, Shape target :%s", srcsize, dstsize)
scaled = cv2.resize(src=small, dsize=dstsize)
features.append(((i+1,j+1),cv2.absdiff(big, scaled)))
return features
def composite(img1, img2, mask0):
if mask0.shape[2] == 3:
mask2 = cv2.cvtColor(mask0, cv2.COLOR_BGR2GRAY)
else:
mask2 = mask0[:]
mask1 = np.ones((img1.shape[0], img1.shape[1], 3), np.uint8)
mask1[..., 0] = mask2
mask1[..., 1] = mask2
mask1[..., 2] = mask2
white = np.ones((img1.shape[0], img1.shape[1], 3), np.uint8)
white[:] = (0, 0, 0)
invmask = np.zeros((img1.shape[0], img1.shape[1], 3), np.uint8)
invmask = cv2.absdiff(white, mask1)
invmask = cv2.bitwise_not(invmask)
output = np.zeros((img1.shape[0], img1.shape[1], 3), np.uint8)
cv2.subtract(img2, invmask, dst=output)
return output
def test_similar():
from itertools import combinations
from collections import defaultdict
from heapq import heappush
def sim1(img1, img2):
h, w, d = img1.shape
total = h*w*d
diff = cv2.absdiff(img1, img2)
num = (diff<10).sum()
return num*1.0/total
names = [os.path.join('scene', c) for c in os.listdir('scene')]
imgs = dict(zip(names, map(cv2.imread, names)))
diffs = defaultdict(list)
for name1, name2 in combinations(names, 2):
img1, img2 = imgs[name1], imgs[name2]
similarity = sim1(img1, img2)
# print 'diff', name1, name2, 'result is:', similarity
heappush(diffs[name1], (-similarity, name2))
heappush(diffs[name2], (-similarity, name1))
for k, v in diffs.iteritems():
print k, v[0][1], -v[0][0]
def get_mask(img1, img2, thresh=20):
if img1.shape != img2.shape:
return
diff = cv2.absdiff(img1, img2)
diff = np.mean(diff, axis=2)
diff[diff<=thresh] = 0
diff[diff>thresh] = 255
mask = np.dstack([diff]*3)
return mask
def test_similar():
from itertools import combinations
from collections import defaultdict
from heapq import heappush
def sim1(img1, img2):
h, w, d = img1.shape
total = h*w*d
diff = cv2.absdiff(img1, img2)
num = (diff<10).sum()
return num*1.0/total
names = [os.path.join('scene', c) for c in os.listdir('scene')]
imgs = dict(zip(names, map(cv2.imread, names)))
diffs = defaultdict(list)
for name1, name2 in combinations(names, 2):
img1, img2 = imgs[name1], imgs[name2]
similarity = sim1(img1, img2)
# print 'diff', name1, name2, 'result is:', similarity
heappush(diffs[name1], (-similarity, name2))
heappush(diffs[name2], (-similarity, name1))
for k, v in diffs.iteritems():
print k, v[0][1], -v[0][0]
def get_mask(img1, img2, thresh=20):
if img1.shape != img2.shape:
return
diff = cv2.absdiff(img1, img2)
diff = np.mean(diff, axis=2)
diff[diff<=thresh] = 0
diff[diff>thresh] = 255
mask = np.dstack([diff]*3)
return mask
def convert_to_linedrawing(self, luminous_image_data):
neiborhood24 = numpy.array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]],
numpy.uint8)
dilated = cv2.dilate(luminous_image_data, neiborhood24, iterations=1)
diff = cv2.absdiff(dilated, luminous_image_data)
linedrawing = cv2.bitwise_not(diff)
return linedrawing
def rgConspicuity(image):
"""
Creates the conspicuity map for the sub channel `red-green conspicuity'.
of the color channel.
"""
def rg(image):
r,g,_,__ = cv2.split(image)
return cv2.absdiff(r,g)
fs = features(image = image, channel = rg)
return sumNormalizedFeatures(fs)
def byConspicuity(image):
"""
Creates the conspicuity map for the sub channel `blue-yellow conspicuity'.
of the color channel.
"""
def by(image):
_,__,b,y = cv2.split(image)
return cv2.absdiff(b,y)
fs = features(image = image, channel = by)
return sumNormalizedFeatures(fs)
#def sumNormalizedFeatures(features, levels=9, startSize=(640,480)):
def makeNormalizedColorChannels(image, thresholdRatio=10.):
"""
Creates a version of the (3-channel color) input image in which each of
the (4) channels is normalized. Implements color opponencies as per
Itti et al. (1998).
Arguments:
image : input image (3 color channels)
thresholdRatio : the threshold below which to set all color values
to zero.
Returns:
an output image with four normalized color channels for red, green,
blue and yellow.
"""
intens = intensity(image)
threshold = intens.max() / thresholdRatio
logger.debug("Threshold: %d", threshold)
r,g,b = cv2.split(image)
cv2.threshold(src=r, dst=r, thresh=threshold, maxval=0.0, type=cv2.THRESH_TOZERO)
cv2.threshold(src=g, dst=g, thresh=threshold, maxval=0.0, type=cv2.THRESH_TOZERO)
cv2.threshold(src=b, dst=b, thresh=threshold, maxval=0.0, type=cv2.THRESH_TOZERO)
R = r - (g + b) / 2
G = g - (r + b) / 2
B = b - (g + r) / 2
Y = (r + g) / 2 - cv2.absdiff(r,g) / 2 - b
# Negative values are set to zero.
cv2.threshold(src=R, dst=R, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)
cv2.threshold(src=G, dst=G, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)
cv2.threshold(src=B, dst=B, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)
cv2.threshold(src=Y, dst=Y, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)
image = cv2.merge((R,G,B,Y))
return image
def diffImg(t0, t1, t2):
d1 = cv2.absdiff(t2, t1)
d2 = cv2.absdiff(t1, t0)
return cv2.bitwise_and(d1, d2)
#Form Config
def are_similar(self, first, second):
res = cv2.absdiff(first, second)
hist = cv2.calcHist([res], [0], None, [256], [0, 256])
return 1 - np.sum(hist[15::]) / np.sum(hist)
frame_diff.py 文件源码
项目:Artificial-Intelligence-with-Python
作者: PacktPublishing
项目源码
文件源码
阅读 16
收藏 0
点赞 0
评论 0
def frame_diff(prev_frame, cur_frame, next_frame):
# Difference between the current frame and the next frame
diff_frames_1 = cv2.absdiff(next_frame, cur_frame)
# Difference between the current frame and the previous frame
diff_frames_2 = cv2.absdiff(cur_frame, prev_frame)
return cv2.bitwise_and(diff_frames_1, diff_frames_2)
# Define a function to get the current frame from the webcam
def have_motion(frame1, frame2):
if frame1 is None or frame2 is None:
return False
delta = cv2.absdiff(frame1, frame2)
thresh = cv2.threshold(delta, 25, 255, cv2.THRESH_BINARY)[1]
return numpy.sum(thresh) > 0
def bgSubtract(rgb):
return cv2.threshold(cv2.cvtColor(cv2.absdiff(rgb, bg), cv2.COLOR_BGR2GRAY), 32, 1, cv2.THRESH_BINARY)[1]
def __call__(self, image: numpy.ndarray, test):
from scipy import stats
def dilate_diff(image, range, iterations=1):
dil = cv2.dilate(image, numpy.ones((range, range), numpy.float32), iterations=iterations)
image = cv2.absdiff(image, dil)
return image
dtype = image.dtype
rgb = (image.transpose(1, 2, 0) + 1) / 2
lab = rgb2lab(rgb) / 100
image = lab[:, :, 0]
image = dilate_diff(image, 3).astype(numpy.float32)
rand = 0.2 + (numpy.random.randn(1) / 20 if not test else 0)
rand = 0.000001 if rand <= 0 else rand
image = cv2.GaussianBlur(image, (5, 5), rand)
rand = 0.4 + (numpy.random.randn(1) / 20 if not test else 0)
rand = 0.000001 if rand <= 0 else rand
image = cv2.GaussianBlur(image, (5, 5), rand)
rand = numpy.random.randn(1) / 40 if not test else 0
image = numpy.power(image, 0.8 + rand)
image = image.astype(dtype)[numpy.newaxis]
return image
def diff_rect(img1, img2, pos=None):
"""find counters include pos in differences between img1 & img2 (cv2 images)"""
diff = cv2.absdiff(img1, img2)
diff = cv2.GaussianBlur(diff, (3, 3), 0)
edges = cv2.Canny(diff, 100, 200)
_, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
if not contours:
return None
contours.sort(key=lambda c: len(c))
# no pos provide, just return the largest different area rect
if pos is None:
cnt = contours[-1]
x0, y0, w, h = cv2.boundingRect(cnt)
x1, y1 = x0+w, y0+h
return (x0, y0, x1, y1)
# else the rect should contain the pos
x, y = pos
for i in range(len(contours)):
cnt = contours[-1-i]
x0, y0, w, h = cv2.boundingRect(cnt)
x1, y1 = x0+w, y0+h
if x0 <= x <= x1 and y0 <= y <= y1:
return (x0, y0, x1, y1)
def delta_images(t0, t1, t2):
d1 = cv2.absdiff(t2, t0)
return d1
def delta_images(t0, t1, t2):
d1 = cv2.absdiff(t2, t0)
return d1
def delta_images(t0, t1, t2):
d1 = cv2.absdiff(t2, t0)
return d1
def delta_images(t0, t1, t2):
d1 = cv2.absdiff(t2, t0)
return d1
def diffImg(t0,t1,t2):
d1 = cv2.absdiff(t2,t1)
d2 = cv2.absdiff(t1,t0)
return cv2.bitwise_and(d1,d2)
def subtract_back(self,frm):
#dst=self.__back__-self.__foreground__
temp=np.zeros((600,800),np.uint8)
self.__foreground__=cv2.blur(self.__foreground__,(3,3))
dst=cv2.absdiff(self.__back__,self.__foreground__)
#dst=cv2.adaptiveThreshold(dst,255,cv.CV_THRESH_BINARY,cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C,5,10)
val,dst=cv2.threshold(dst,0,255,cv.CV_THRESH_BINARY+cv.CV_THRESH_OTSU)
fg=cv2.erode(dst,None,iterations=1)
bg=cv2.dilate(dst,None,iterations=4)
_,bg=cv2.threshold(bg,1,128,1)
mark=cv2.add(fg,bg)
mark32=np.int32(mark)
#dst.copy(temp)
#seq=cv.FindContours(cv.fromarray(dst),self.mem,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE)
#cntr,h=cv2.findContours(dst,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE)
#print cntr,h
#cv.DrawContours(cv.fromarray(temp),seq,(255,255,255),(255,255,255),1,cv.CV_FILLED)
cv2.watershed(frm, mark32)
self.final_mask=cv2.convertScaleAbs(mark32)
#print temp
#--outputs---
#cv2.imshow("subtraction",fg)
#cv2.imshow("thres",dst)
#cv2.imshow("thres1",bg)
#cv2.imshow("mark",mark)
#cv2.imshow("final",self.final_mask)
def __threshold_moving(input, last_image):
"""Thresholds off parts of the image that have moved or changed between
the previous and next image.
Args:
input: A numpy.ndarray.
last_image: The previous value of the numpy.ndarray.
Returns:
A numpy.ndarray with the parts that are the same in black.
"""
if (last_image.shape == input.shape):
output = cv2.absdiff(input, last_image)
else:
output = numpy.ndarray(shape=input.shape, dtype=input.dtype)
return input, output
def trackPoint(grayimage1, grayimage2):
moveData = [] # initialize list of movementCenterPoints
biggestArea = MIN_AREA
# Get differences between the two greyed images
differenceImage = cv2.absdiff( grayimage1, grayimage2 )
# Blur difference image to enhance motion vectors
differenceImage = cv2.blur( differenceImage,(BLUR_SIZE,BLUR_SIZE ))
# Get threshold of blurred difference image based on THRESHOLD_SENSITIVITY variable
retval, thresholdImage = cv2.threshold( differenceImage, THRESHOLD_SENSITIVITY, 255, cv2.THRESH_BINARY )
try:
thresholdImage, contours, hierarchy = cv2.findContours( thresholdImage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE )
except:
contours, hierarchy = cv2.findContours( thresholdImage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE )
if contours != ():
for c in contours:
cArea = cv2.contourArea(c)
if cArea > biggestArea:
biggestArea = cArea
( x, y, w, h ) = cv2.boundingRect(c)
cx = int(x + w/2) # x center point of contour
cy = int(y + h/2) # y center point of contour
moveData = [cx, cy, w, h]
return moveData
#-----------------------------------------------------------------------------------------------
def detect(self, image):
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray_image = cv2.equalizeHist(gray_image)
blurred = cv2.GaussianBlur(gray_image, self.kernel, self.sigma)
if self.prevImage is None:
self.prevImage = blurred
diff = cv2.absdiff(self.prevImage, blurred)
_, binary = cv2.threshold(diff, 21, 255, cv2.THRESH_BINARY)
if eval(cv2.__version__.split('.')[0]) == 3:
_, cnts, hier = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
else:
cnts, hier = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
if len(cnts) < 1:
is_detected = False
contour = None
else:
largest_contour = cnts[0]
if cv2.contourArea(largest_contour) < self.min_detection_area:
is_detected = False
contour = None
else:
is_detected = True
contour = largest_contour
self.prevImage = blurred
return is_detected, contour