python类add()的实例源码

video.py 文件源码 项目:emojivis 作者: JustinShenk 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def read(self, dst=None):
        w, h = self.frame_size

        if self.bg is None:
            buf = np.zeros((h, w, 3), np.uint8)
        else:
            buf = self.bg.copy()

        self.render(buf)

        if self.noise > 0.0:
            noise = np.zeros((h, w, 3), np.int8)
            cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
            buf = cv2.add(buf, noise, dtype=cv2.CV_8UC3)
        return True, buf
alignment.py 文件源码 项目:car-detection 作者: mmetcalfe 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def saveAverageImage(kitti_base, pos_labels, shape, fname, avg_num=None):
    num_images = float(len(pos_labels))
    avg_num = min(avg_num, num_images)
    if avg_num is None:
        avg_num = num_images

    # avg_img = np.zeros((shape[0],shape[1],3), np.float32)
    avg_img = np.zeros(shape, np.float32)
    progressbar = ProgressBar('Averaging ' + fname, max=len(pos_labels))
    num = 0
    for label in pos_labels:
        if num >= avg_num:
            break
        num += 1
        progressbar.next()
        sample = getCroppedSampleFromLabel(kitti_base, label)
        # sample = np.float32(sample)

        resized = resizeSample(sample, shape, label)

        resized = auto_canny(resized)
        resized = np.float32(resized)

        avg_img = cv2.add(avg_img, resized / float(avg_num))
    progressbar.finish()

    cv2.imwrite(fname, avg_img)
images.py 文件源码 项目:car-detection 作者: mmetcalfe 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def crop_rectangle(img, pixel_rect):
    # Note: Need to add 1 to end coordinates because pixel rectangle corners are
    # inclusive.
    cropped = img[pixel_rect.y1:pixel_rect.y2+1, pixel_rect.x1:pixel_rect.x2+1, :]
    # cropped = img[pixel_rect.y1:pixel_rect.y2, pixel_rect.x1:pixel_rect.x2, :]
    return cropped

# save_opencv_bounding_box_info :: String -> Map String gm.PixelRectangle
images.py 文件源码 项目:car-detection 作者: mmetcalfe 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def average_image(pos_region_generator, shape, avg_num=None):
    pos_regions = list(pos_region_generator)

    num_images = float(len(pos_regions))
    if avg_num is None:
        avg_num = num_images
    else:
        avg_num = min(avg_num, num_images)

    window_dims = (shape[1], shape[0])

    # avg_img = np.zeros((shape[0],shape[1],3), np.float32)
    avg_img = np.zeros(shape, np.float32)
    progressbar = ProgressBar('Averaging ', max=avg_num)
    num = 0
    for reg in pos_regions:
        if num >= avg_num:
            break
        num += 1
        progressbar.next()

        resized = reg.load_cropped_resized_sample(window_dims)

        resized = auto_canny(resized)
        resized = np.float32(resized)

        avg_img = cv2.add(avg_img, resized / float(avg_num))
    progressbar.finish()

    return avg_img
img_utils.py 文件源码 项目:cv-utils 作者: gmichaeljaison 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def brightness(img, alpha):
    return cv.add(img, alpha)
video.py 文件源码 项目:OpenCV-Snapchat-DogFilter 作者: sguduguntla 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def read(self, dst=None):
        w, h = self.frame_size

        if self.bg is None:
            buf = np.zeros((h, w, 3), np.uint8)
        else:
            buf = self.bg.copy()

        self.render(buf)

        if self.noise > 0.0:
            noise = np.zeros((h, w, 3), np.int8)
            cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
            buf = cv2.add(buf, noise, dtype=cv2.CV_8UC3)
        return True, buf
video.py 文件源码 项目:OpenCV-Snapchat-DogFilter 作者: sguduguntla 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def read(self, dst=None):
        noise = np.zeros(self.render.sceneBg.shape, np.int8)
        cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)

        return True, cv2.add(self.render.getNextFrame(), noise, dtype=cv2.CV_8UC3)
video.py 文件源码 项目:OpenCV-Snapchat-DogFilter 作者: sguduguntla 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def read(self, dst=None):
        noise = np.zeros(self.render.sceneBg.shape, np.int8)
        cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)

        return True, cv2.add(self.render.getNextFrame(), noise, dtype=cv2.CV_8UC3)
Tablet.py 文件源码 项目:CameraTablet 作者: dmvlasenk 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def setCorners(self):
        name_window = 'Set Corners'
        cv2.namedWindow(name_window)
        cv2.setMouseCallback(name_window, saveTabletCorners)
        cap = cv2.VideoCapture(0)
        ret, frame_from = cap.read()
        TTablet.m_CornersX = []
        TTablet.m_CornersY = []
        TTablet.m_AddFrame = np.zeros(frame_from.shape, np.uint8) 
        #print ("start setCorners")

        while(cap.isOpened()):
            ret, frame_from = cap.read()
            frame_from  = cv2.flip(frame_from, -1)
            frame = cv2.add(TTablet.m_AddFrame, frame_from)
            if ret==True:
                cv2.imshow(name_window,frame)
                #print ("fasdfasdf")
                if cv2.waitKey(1) & (len(TTablet.m_CornersX) > 3):
                    break
            else:
                break
        # Release everything if job is finished
        cap.release()
        #out.release()
        cv2.destroyAllWindows()
nn_calc.py 文件源码 项目:tf_ViZDoom 作者: bounty030 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def store_img(img, add):
    name  = 'image_' + str(add) + '.png'
    cv2.imwrite(name, img)
nn_calc.py 文件源码 项目:tf_ViZDoom 作者: bounty030 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def store_img(img, add, path):
    name  = 'image_' + str(add) + '.png'
    cv2.imwrite(os.path.join(path, name), img)
Tshirt.py 文件源码 项目:virtual-dressing-room 作者: akash0x53 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def detect_shirt2(self):
        self.hsv=cv2.cvtColor(self.norm_rgb,cv.CV_BGR2HSV)
        self.hue,s,_=cv2.split(self.hsv)

        _,self.dst=cv2.threshold(self.hue,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        self.fg=cv2.erode(self.dst,None,iterations=3)
        self.bg=cv2.dilate(self.dst,None,iterations=1)
        _,self.bg=cv2.threshold(self.bg,1,128,1)
        mark=cv2.add(self.fg,self.bg)
        mark32=np.int32(mark)
        cv2.watershed(self.norm_rgb,mark32)

        m=cv2.convertScaleAbs(mark32)
        _,m=cv2.threshold(m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)

        cntr,h=cv2.findContours(m,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE)
        print len(cntr)
        #print cntr[0].shape
        #cntr[1].dtype=np.float32
        #ret=cv2.contourArea(np.array(cntr[1]))
        #print ret
        #cntr[0].dtype=np.uint8
        cv2.drawContours(m,cntr,-1,(255,255,255),3)
        cv2.imshow("mask_fg",self.fg)
        cv2.imshow("mask_bg",self.bg)
        cv2.imshow("mark",m)
Back_sub.py 文件源码 项目:virtual-dressing-room 作者: akash0x53 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def subtract_back(self,frm):
        #dst=self.__back__-self.__foreground__
        temp=np.zeros((600,800),np.uint8)

        self.__foreground__=cv2.blur(self.__foreground__,(3,3))
        dst=cv2.absdiff(self.__back__,self.__foreground__)

        #dst=cv2.adaptiveThreshold(dst,255,cv.CV_THRESH_BINARY,cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C,5,10)
        val,dst=cv2.threshold(dst,0,255,cv.CV_THRESH_BINARY+cv.CV_THRESH_OTSU)

        fg=cv2.erode(dst,None,iterations=1)
        bg=cv2.dilate(dst,None,iterations=4)

        _,bg=cv2.threshold(bg,1,128,1)

        mark=cv2.add(fg,bg)
        mark32=np.int32(mark)
        #dst.copy(temp)

        #seq=cv.FindContours(cv.fromarray(dst),self.mem,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE)
        #cntr,h=cv2.findContours(dst,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE)
        #print cntr,h
        #cv.DrawContours(cv.fromarray(temp),seq,(255,255,255),(255,255,255),1,cv.CV_FILLED)
        cv2.watershed(frm, mark32)
        self.final_mask=cv2.convertScaleAbs(mark32)
        #print temp

        #--outputs---
        #cv2.imshow("subtraction",fg)
        #cv2.imshow("thres",dst)
        #cv2.imshow("thres1",bg)
        #cv2.imshow("mark",mark)
        #cv2.imshow("final",self.final_mask)
faceframes.py 文件源码 项目:The-Machine 作者: Jo-Dan 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def overlayimg(back, fore, x, y, w, h):
    # Load two images
    img1 = np.array(back)
    img2 = np.array(fore)

    # create new dimensions
    r = float((h)) / img2.shape[1]
    dim = ((w), int(img2.shape[1] * r))

    # Now create a mask of box and create its inverse mask also
    img2gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
    mask_inv = cv2.bitwise_not(mask)

    # resize box and masks
    resized_img2 = cv2.resize(img2, dim, interpolation=cv2.INTER_AREA)
    resized_mask = cv2.resize(mask, dim, interpolation=cv2.INTER_AREA)
    resized_mask_inv = cv2.resize(mask_inv, dim, interpolation=cv2.INTER_AREA)

    # I want to put box in co-ordinates, So I create a ROI
    rows, cols, channels = resized_img2.shape
    roi = img1[y:y+rows, x:x+cols]

    # Now black-out the area of box in ROI
    img1_bg = cv2.bitwise_and(roi, roi, mask=resized_mask_inv)

    # Take only region of box from box image.
    img2_fg = cv2.bitwise_and(resized_img2, resized_img2, mask=resized_mask)

    # Put box in ROI and modify the main image
    dst = cv2.add(img1_bg, img2_fg)
    img1[y:y+rows, x:x+cols] = dst
    return img1
watermark.py 文件源码 项目:python_wavelet_digital_watermarking 作者: NewRegin 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def channel_embedding(origin_image_chan, watermark_img_chan):
    coeffs1_1, coeffs1_2, coeffs1_3, coeffs2_3 = dwt2(origin_image_chan, watermark_img_chan)
    embedding_image = cv2.add(cv2.multiply(ORIGIN_RATE, coeffs1_3[0]), cv2.multiply(WATERMARK_RATE, coeffs2_3[0]))
    embedding_image = idwt2(embedding_image, coeffs1_1[1], coeffs1_2[1], coeffs1_3[1])
    np.clip(embedding_image, 0, 255, out=embedding_image)
    embedding_image = embedding_image.astype('uint8')
    return embedding_image
blob.py 文件源码 项目:WPAL-network 作者: kyu-sz 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def prep_img_for_blob(img, pixel_means, target_size, max_area, min_size):
    """Mean subtract and scale an image for use in a blob."""
    img = img.astype(np.float32, copy=False)
    img -= pixel_means
    img_shape = img.shape
    img_size_min = np.min(img_shape[0:2])
    img_size_max = np.max(img_shape[0:2])
    img_scale = float(target_size) / float(img_size_max)

    # Prevent the shorter sides from being less than MIN_SIZE
    if np.round(img_scale * img_size_min < min_size):
        img_scale = np.round(min_size / img_size_min) + 1

    # Prevent the scaled area from being more than MAX_AREA
    if np.round(img_scale * img_size_min * img_scale * img_size_max) > max_area:
        img_scale = math.sqrt(float(max_area) / float(img_size_min * img_size_max))

    # Resize the sample.
    img = cv2.resize(img, None, None, fx=img_scale, fy=img_scale, interpolation=cv2.INTER_LINEAR)

    # Randomly rotate the sample.
    img = cv2.warpAffine(img,
                         cv2.getRotationMatrix2D((img.shape[1] / 2, img.shape[0] / 2),
                                                 np.random.randint(-15, 15), 1),
                         (img.shape[1], img.shape[0]))

    # Perform RGB Jittering
    h, w, c = img.shape
    zitter = np.zeros_like(img)
    for i in xrange(c):
        zitter[:, :, i] = np.random.randint(0, cfg.TRAIN.RGB_JIT, (h, w)) - cfg.TRAIN.RGB_JIT / 2
    img = cv2.add(img, zitter)

    return img, img_scale
video.py 文件源码 项目:memegenerator 作者: Huxwell 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def read(self, dst=None):
        w, h = self.frame_size

        if self.bg is None:
            buf = np.zeros((h, w, 3), np.uint8)
        else:
            buf = self.bg.copy()

        self.render(buf)

        if self.noise > 0.0:
            noise = np.zeros((h, w, 3), np.int8)
            cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
            buf = cv2.add(buf, noise, dtype=cv2.CV_8UC3)
        return True, buf
video.py 文件源码 项目:Image-Processing-and-Feature-Detection 作者: amita-kapoor 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def read(self, dst=None):
        w, h = self.frame_size

        if self.bg is None:
            buf = np.zeros((h, w, 3), np.uint8)
        else:
            buf = self.bg.copy()

        self.render(buf)

        if self.noise > 0.0:
            noise = np.zeros((h, w, 3), np.int8)
            cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
            buf = cv2.add(buf, noise, dtype=cv2.CV_8UC3)
        return True, buf
video.py 文件源码 项目:Image-Processing-and-Feature-Detection 作者: amita-kapoor 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def read(self, dst=None):
        noise = np.zeros(self.render.sceneBg.shape, np.int8)
        cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)

        return True, cv2.add(self.render.getNextFrame(), noise, dtype=cv2.CV_8UC3)
video.py 文件源码 项目:Image-Processing-and-Feature-Detection 作者: amita-kapoor 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def read(self, dst=None):
        noise = np.zeros(self.render.sceneBg.shape, np.int8)
        cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)

        return True, cv2.add(self.render.getNextFrame(), noise, dtype=cv2.CV_8UC3)


问题


面经


文章

微信
公众号

扫码关注公众号