python类COLOR_RGB2BGR的实例源码

avatar.py 文件源码 项目:PixivAvatarBot 作者: kophy 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def generate_avatar(dir, filename):
    """
    ????????????dir/avatar_filename
    :return: ?????????bool?
    """
    pil_image = numpy.array(Image.open(os.path.join(dir, filename)));
    image = None;
    try:
        image = cv2.cvtColor(numpy.array(pil_image), cv2.COLOR_RGB2BGR);
    except:
        image = numpy.array(pil_image);
    avatar = crop_avatar(image);
    if avatar is None:
        return False;
    else:
        cv2.imwrite(os.path.join(dir, "avatar_" + filename), avatar);
        return True;
plotting.py 文件源码 项目:face_detection 作者: chintak 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def plot_face_bb(p, bb, scale=True, path=True, plot=True):
    if path:
        im = cv2.imread(p)
    else:
        im = cv2.cvtColor(p, cv2.COLOR_RGB2BGR)
    if scale:
        h, w, _ = im.shape
        cv2.rectangle(im, (int(bb[0] * h), int(bb[1] * w)),
                      (int(bb[2] * h), int(bb[3] * w)),
                      (255, 255, 0), thickness=4)
        # print bb * np.asarray([h, w, h, w])
    else:
        cv2.rectangle(im, (int(bb[0]), int(bb[1])), (int(bb[2]), int(bb[3])),
                      (255, 255, 0), thickness=4)
        print "no"
    if plot:
        plt.figure()
        plt.imshow(im[:, :, ::-1])
    else:
        return im[:, :, ::-1]
forest.py 文件源码 项目:checkmymeat 作者: kendricktan 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def predict(url):
    global model      
    # Read image
    image = io.imread(url)
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    image = cv2.resize(image, (500, 500), interpolation=cv2.INTER_CUBIC)    

    # Use otsu to mask
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    ret, mask = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    mask = cv2.medianBlur(mask, 5)

    features = describe(image, mask)

    state = le.inverse_transform(model.predict([features]))[0]
    return {'type': state}
argumentation_utils.py 文件源码 项目:bot2017Fin 作者: AllanYiin 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def equal_color(img: Image, color):
    arr_img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
    arr_img = cv2.resize(arr_img, (img.size[0] * 10, img.size[1] * 10))
    boundaries = []
    boundaries.append(([max(color[2] - 15, 0), max(color[1] - 15, 0), max(color[0] - 15, 0)],
                       [min(color[2] + 15, 255), min(color[1] + 15, 255), min(color[0] + 15, 255)]))
    for (lower, upper) in boundaries:
        lower = np.array(lower, dtype="uint8")
        upper = np.array(upper, dtype="uint8")

        # find the colors within the specified boundaries and apply
        # the mask
        mask = cv2.inRange(arr_img, lower, upper)
        res = cv2.bitwise_and(arr_img, arr_img, mask=mask)
        res = cv2.resize(res, (img.size[0], img.size[1]))
        cv2_im = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
        output_img = Image.fromarray(cv2_im)

        return output_img
image.py 文件源码 项目:tensorlight 作者: bsautermeister 项目源码 文件源码 阅读 46 收藏 0 点赞 0 评论 0
def write(filepath, image):
    """Saves an image or a frame to the specified path.
    Parameters
    ----------
    filepath: str
        The path to the file.
    image: ndarray(float/int)
        The image data.
    value_range: int (e.g. VALUE_RANGE_0_1)
        The value range of the provided image data.
    """
    dirpath = os.path.dirname(filepath)
    if not os.path.exists(dirpath):
        os.makedirs(dirpath)

    if image.shape[2] == 3:
        image = cast(image)
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

    factor = 1
    if is_float_image(image):
        factor = 255

    cv2.imwrite(filepath, image * factor)
torch_neural_net.lutorpy.py 文件源码 项目:EyesInTheSky 作者: SherineSameh 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def forward(self, rgbImg):
        """
        Perform a forward network pass of an RGB image.

        :param rgbImg: RGB image to process. Shape: (imgDim, imgDim, 3)
        :type rgbImg: numpy.ndarray
        :return: Vector of features extracted from the neural network.
        :rtype: numpy.ndarray
        """
        assert rgbImg is not None

        t = '/tmp/openface-torchwrap-{}.png'.format(
            binascii.b2a_hex(os.urandom(8)))
        bgrImg = cv2.cvtColor(rgbImg, cv2.COLOR_RGB2BGR)
        cv2.imwrite(t, bgrImg)
        rep = self.forwardPath(t)
        os.remove(t)
        return rep
torch_neural_net.py 文件源码 项目:EyesInTheSky 作者: SherineSameh 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def forward(self, rgbImg):
        """
        Perform a forward network pass of an RGB image.

        :param rgbImg: RGB image to process. Shape: (imgDim, imgDim, 3)
        :type rgbImg: numpy.ndarray
        :return: Vector of features extracted from the neural network.
        :rtype: numpy.ndarray
        """
        assert rgbImg is not None

        t = '/tmp/openface-torchwrap-{}.png'.format(
            binascii.b2a_hex(os.urandom(8)))
        bgrImg = cv2.cvtColor(rgbImg, cv2.COLOR_RGB2BGR)
        cv2.imwrite(t, bgrImg)
        rep = self.forwardPath(t)
        os.remove(t)
        return rep
trainer.py 文件源码 项目:ssd_tensorflow 作者: seann999 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def draw_outputs(img, boxes, confidences, wait=1):
    I = img * 255.0

    #nms = non_max_suppression_fast(np.asarray(filtered_boxes), 1.00)
    picks = postprocess_boxes(boxes, confidences)

    for box, conf, top_label in picks:#[filtered[i] for i in picks]:
        if top_label != classes:
            #print("%f: %s %s" % (conf, coco.i2name[top_label], box))

            c = colorsys.hsv_to_rgb(((top_label * 17) % 255) / 255.0, 1.0, 1.0)
            c = tuple([255*c[i] for i in range(3)])

            draw_ann(I, box, i2name[top_label], color=c, confidence=conf)

    I = cv2.cvtColor(I.astype(np.uint8), cv2.COLOR_RGB2BGR)
    cv2.imshow("outputs", I)
    cv2.waitKey(wait)
trainer.py 文件源码 项目:textboxes 作者: shinjayne 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def draw_outputs(img, boxes, confidences, wait=1):
    I = img * 255.0

    #nms = non_max_suppression_fast(np.asarray(filtered_boxes), 1.00)
    picks = postprocess_boxes(boxes, confidences)

    for box, conf, top_label in picks:#[filtered[i] for i in picks]:
        if top_label != classes:
            #print("%f: %s %s" % (conf, coco.i2name[top_label], box))

            c = colorsys.hsv_to_rgb(((top_label * 17) % 255) / 255.0, 1.0, 1.0)
            c = tuple([255*c[i] for i in range(3)])

    I = cv2.cvtColor(I.astype(np.uint8), cv2.COLOR_RGB2BGR)
    cv2.imshow("outputs", I)
    cv2.waitKey(wait)
Screen.py 文件源码 项目:fatego-auto 作者: lishunan246 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self):
        t = ImageGrab.grab().convert("RGB")
        self.screen = cv2.cvtColor(numpy.array(t), cv2.COLOR_RGB2BGR)

        self.ultLoader = ImageLoader('image/ult/')

        if self.have('topleft'):
            tl = self._imageLoader.get('topleft')
            res = cv2.matchTemplate(self.screen, tl, cv2.TM_CCOEFF_NORMED)

            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            x1, y1 = max_loc
            rd = self._imageLoader.get('rightdown')
            res = cv2.matchTemplate(self.screen, rd, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            x2, y2 = max_loc
            # default 989
            GameStatus().y = y2 - y1
            GameStatus().use_Droid4X = True
server.py 文件源码 项目:img_classifier_prepare 作者: zonekey 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def post(self):
        '''  curl --request POST -data-binary "@fname.jpg" --header "Content-Type: image/jpg"  http://localhost:8899/pic
             ??????? ...
        '''
        global cf,lock

        body = self.request.body
        try:
            img = Image.open(StringIO.StringIO(body))
            img = cv.cvtColor(np.array(img), cv.COLOR_RGB2BGR)
            lock.acquire()
            pred = cf.predicate(img)
            lock.release()
            rx = { "result": [] }
            for i in range(0, 3):
                r = { 'title': pred[i][1], 'score': float(pred[i][2]) }
                rx['result'].append(r)
            self.finish(rx)
        except Exception as e:
            print e
            self.finish(str(e))



# ?????????????? url?????????????? ..
Screenshot.py 文件源码 项目:osrmacro 作者: jjvilm 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def shoot(x1,y1,x2,y2, *args, **kwargs):
    """Takes screenshot at given coordinates as PIL image format, the converts to cv2 grayscale image format and returns it"""
    # creates widht & height for screenshot region
    w = x2 - x1
    h = y2 - y1
    # PIL format as RGB
    img = pyautogui.screenshot(region=(x1,y1,w,h)) #X1,Y1,X2,Y2
    #im.save('screenshot.png')

    # Converts to an array used for OpenCV
    img = np.array(img)

    try:
        for arg in args:
            if arg == 'hsv':
                # Converts to BGR format for OpenCV
                img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
                hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
                return hsv_img

            if arg == 'rgb':
                rgb_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
                return rgb_img
    except:
        pass

    cv_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    return cv_gray
log_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def decode_rgb(self, data): 
        w, h = data.image.width, data.image.height;
        if data.image.image_data_format == self.image_msg_t_.VIDEO_RGB_JPEG: 
            img = cv2.imdecode(np.asarray(bytearray(data.image.image_data), dtype=np.uint8), -1)
            bgr = img.reshape((h,w,3))[::self.skip, ::self.skip, :]             
        else: 
            img = np.fromstring(data.image.image_data, dtype=np.uint8)
            rgb = img.reshape((h,w,3))[::self.skip, ::self.skip, :] 
            bgr = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
        if not self.bgr: 
            return cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
        else: 
            return bgr
nyu_rgbd.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _process_items(self, index, rgb_im, depth_im, instance, label, bbox, pose): 
        # print 'Processing pose', pose, bbox


        # def _process_bbox(bbox): 
        #     return dict(category=bbox['category'], target=UWRGBDDataset.target_hash[str(bbox['category'])], 
        #                 left=bbox.coords['left'], right=bbox['right'], top=bbox['top'], bottom=bbox['bottom'])

        # # Compute bbox from pose and map (v2 support)
        # if self.version == 'v1': 
        #     if bbox is not None: 
        #         bbox = [_process_bbox(bb) for bb in bbox]
        #         bbox = filter(lambda bb: bb['target'] in UWRGBDDataset.train_ids_set, bbox)

        # if self.version == 'v2': 
        #     if bbox is None and hasattr(self, 'map_info'): 
        #         bbox = self.get_bboxes(pose)

        # print 'Processing pose', pose, bbox

        rgb_im = np.swapaxes(rgb_im, 0, 2)
        rgb_im = cv2.cvtColor(rgb_im, cv2.COLOR_RGB2BGR)

        depth_im = np.swapaxes(depth_im, 0, 1) * 1000
        instance = np.swapaxes(instance, 0, 1)
        label = np.swapaxes(label, 0, 1)

        return AttrDict(index=index, img=rgb_im, depth=depth_im, instance=instance, 
                        label=label, bbox=bbox if bbox is not None else [], pose=pose)
image_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def to_color(im, flip_rb=False): 
    if im.ndim == 2: 
        return cv2.cvtColor(im, cv2.COLOR_GRAY2RGB if flip_rb else cv2.COLOR_GRAY2BGR)
    else: 
        return cv2.cvtColor(im, cv2.COLOR_RGB2BGR) if flip_rb else im.copy()
helpers.py 文件源码 项目:spikefuel 作者: duguyue100 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def read_video(v_name):
    """A workaround function for reading video.

    Apparently precompiled OpenCV couldn't read AVI videos on Mac OS X
    and Linux,
    therefore I use PyAV, a ffmpeg binding to extract video frames

    Parameters
    ----------
    v_name : string
        absolute path to video

    Returns
    -------
    frames : list
        An ordered list for storing frames
    num_frames : int
        number of frames in the video
    """
    container = av.open(v_name)
    video = next(s for s in container.streams if s.type == b'video')

    frames = []
    for packet in container.demux(video):
        for frame in packet.decode():
            frame_t = np.array(frame.to_image())
            frames.append(cv2.cvtColor(frame_t, cv2.COLOR_RGB2BGR))

    return frames, len(frames)
blur_image.py 文件源码 项目:DeblurGAN 作者: KupynOrest 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def blur_image(self, save=False, show=False):
        if self.part is None:
            psf = self.PSFs
        else:
            psf = [self.PSFs[self.part]]
        yN, xN, channel = self.shape
        key, kex = self.PSFs[0].shape
        delta = yN - key
        assert delta >= 0, 'resolution of image should be higher than kernel'
        result=[]
        if len(psf) > 1:
            for p in psf:
                tmp = np.pad(p, delta // 2, 'constant')
                cv2.normalize(tmp, tmp, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
                # blured = np.zeros(self.shape)
                blured = cv2.normalize(self.original, self.original, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,
                                       dtype=cv2.CV_32F)
                blured[:, :, 0] = np.array(signal.fftconvolve(blured[:, :, 0], tmp, 'same'))
                blured[:, :, 1] = np.array(signal.fftconvolve(blured[:, :, 1], tmp, 'same'))
                blured[:, :, 2] = np.array(signal.fftconvolve(blured[:, :, 2], tmp, 'same'))
                blured = cv2.normalize(blured, blured, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
                blured = cv2.cvtColor(blured, cv2.COLOR_RGB2BGR)
                result.append(np.abs(blured))
        else:
            psf = psf[0]
            tmp = np.pad(psf, delta // 2, 'constant')
            cv2.normalize(tmp, tmp, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
            blured = cv2.normalize(self.original, self.original, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,
                                   dtype=cv2.CV_32F)
            blured[:, :, 0] = np.array(signal.fftconvolve(blured[:, :, 0], tmp, 'same'))
            blured[:, :, 1] = np.array(signal.fftconvolve(blured[:, :, 1], tmp, 'same'))
            blured[:, :, 2] = np.array(signal.fftconvolve(blured[:, :, 2], tmp, 'same'))
            blured = cv2.normalize(blured, blured, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
            blured = cv2.cvtColor(blured, cv2.COLOR_RGB2BGR)
            result.append(np.abs(blured))
        self.result = result
        if show or save:
            self.__plot_canvas(show, save)
utils.py 文件源码 项目:kaggle-dstl-satellite-imagery-feature-detection 作者: u1234x1234 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_rgb_image(img_id, h=None, w=None):
    image = get_rgb_data(img_id)
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    for c in range(3):
        min_val, max_val = np.percentile(image[:, :, c], [2, 98])
        image[:, :, c] = 255*(image[:, :, c] - min_val) / (max_val - min_val)
        image[:, :, c] = np.clip(image[:, :, c], 0, 255)
    image = (image).astype(np.uint8)
    if h and w:
        image = cv2.resize(image, (w, h), interpolation=cv2.INTER_LANCZOS4)
    return image
augment_batch_iterator.py 文件源码 项目:face_detection 作者: chintak 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_scaled_translated_img_bb(self, name, bb):
        im = imread(name)
        img = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
        h0, w0, _ = self.img_size
        wc, hc = (bb[0] + bb[2]) / 2, (bb[1] + bb[3]) / 2
        face_width = (bb[3] - bb[1]) / 2
        # Old approach: scale and then translate
        res, new_face_width, shc, swc = self.compute_scale_factor(
            img, face_width, hc, wc)
        thc, twc = self.compute_translation(new_face_width, shc, swc)
        # New approach: translate and then scale
        # thc, twc = self.compute_translation(face_width, hc, wc,
        #                                     min_pad=self.MIN_FACE_SIZE + 10)
        # high_scale = np.min([thc - 5, h0 - thc - 5, twc - 5, w0 - twc - 5])
        # res, new_face_width, shc, swc = self.compute_scale_factor(
        #     img, face_width, hc, wc,
        #     high_scale=high_scale, low_scale=None)
        out_bgr, new_bb = self.copy_source_to_target(res, new_face_width,
                                                     shc, swc, thc, twc)

        log = "%.1f,%.1f,%.0f\n" % (
            (new_bb[1] + new_bb[3]) / 2, (new_bb[0] + new_bb[2]) / 2, new_face_width * 2)
        with open('aug.csv', mode='a', buffering=0) as f:
            f.write(log)
        # cv2.rectangle(out_bgr, (int(new_bb[0]), int(new_bb[1])), (int(new_bb[2]), int(new_bb[3])),
        #               (255, 255, 0), thickness=4)
        # cv2.imwrite("%d.jpg" % os.getpid(), out_bgr)
        # sys.exit(0)
        out = cv2.cvtColor(out_bgr, cv2.COLOR_BGR2RGB)
        return out, new_bb
example.py 文件源码 项目:tensorflow-action-conditional-video-prediction 作者: williamd4112 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def main(args):
    from tfacvp.model import ActionConditionalVideoPredictionModel
    from tfacvp.util import post_process_rgb

    with tf.Graph().as_default() as graph:    
        logging.info('Create model [num_act = %d] for testing' % (args.num_act))
        model = ActionConditionalVideoPredictionModel(num_act=args.num_act, is_train=False)

        config = get_config(args)
        s = np.load(args.data)
        mean = np.load(args.mean)
        scale = 255.0

        with tf.Session(config=config) as sess:
            logging.info('Loading weights from %s' % (args.load))
            model.restore(sess, args.load)

            for i in range(args.num_act):
                logging.info('Predict next frame condition on action %d' % (i))
                a = np.identity(args.num_act)[i]
                x_t_1_pred_batch = model.predict(sess, s[np.newaxis, :], a[np.newaxis, :])[0]

                img = x_t_1_pred_batch[0]
                img = post_process(img, mean, scale)
                img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
                cv2.imwrite('pred-%02d.png' % i, img)
example.py 文件源码 项目:rl-attack-detection 作者: yenchenlin 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def main(args):
    from tfacvp.model import ActionConditionalVideoPredictionModel
    from tfacvp.util import post_process_rgb

    with tf.Graph().as_default() as graph:    
        logging.info('Create model [num_act = %d] for testing' % (args.num_act))
        model = ActionConditionalVideoPredictionModel(num_act=args.num_act, is_train=False)

        config = get_config(args)
        s = np.load(args.data)
        mean = np.load(args.mean)
        scale = 255.0

        with tf.Session(config=config) as sess:
            logging.info('Loading weights from %s' % (args.load))
            model.restore(sess, args.load)

            for i in range(args.num_act):
                logging.info('Predict next frame condition on action %d' % (i))
                a = np.identity(args.num_act)[i]
                x_t_1_pred_batch = model.predict(sess, s[np.newaxis, :], a[np.newaxis, :])[0]

                img = x_t_1_pred_batch[0]
                img = post_process(img, mean, scale)
                img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
                cv2.imwrite('pred-%02d.png' % i, img)
demo.py 文件源码 项目:pycaffe-yolo 作者: Zehaos 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def main(argv):
    model_filename = ''
    weight_filename = ''
    img_filename = ''
    try:
        opts, args = getopt.getopt(argv, "hm:w:i:")
        print opts
    except getopt.GetoptError:
        print 'yolo_main.py -m <model_file> -w <output_file> -i <img_file>'
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print 'yolo_main.py -m <model_file> -w <weight_file> -i <img_file>'
            sys.exit()
        elif opt == "-m":
            model_filename = arg
        elif opt == "-w":
            weight_filename = arg
        elif opt == "-i":
            img_filename = arg
    print 'model file is "', model_filename
    print 'weight file is "', weight_filename
    print 'image file is "', img_filename

    caffe.set_device(0)
    caffe.set_mode_gpu()
    net = caffe.Net(model_filename, weight_filename, caffe.TEST)
    img = caffe.io.load_image(img_filename)  # load the image using caffe io
    img_ = scipy.misc.imresize(img, (448, 448))
    transformer = SimpleTransformer([104.00699, 116.66877, 122.67892])
    input = transformer.preprocess(img_)
    out = net.forward_all(data=input)
    print out.iteritems()
    img_cv = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    results = interpret_output(out['result'][0], img.shape[1], img.shape[0])  # fc27 instead of fc12 for yolo_small
    show_results(img_cv, results, img.shape[1], img.shape[0])
    cv2.waitKey(0)
constrained_opt.py 文件源码 项目:iGAN 作者: junyanz 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def save_constraints(self):
        [im_c, mask_c, im_e, mask_e] = self.combine_constraints(self.constraints)
        # write image
        # im_c2 = cv2.cvtColor(im_c, cv2.COLOR_RGB2BGR)
        # cv2.imwrite('input_color_image.png', im_c2)
        # cv2.imwrite('input_color_mask.png', mask_c)
        # cv2.imwrite('input_edge_map.png', im_e)
        self.prev_im_c = im_c.copy()
        self.prev_mask_c = mask_c.copy()
        self.prev_im_e = im_e.copy()
        self.prev_mask_e =mask_e.copy()
train_predict_z.py 文件源码 项目:iGAN 作者: junyanz 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def rec_test(test_data, n_epochs=0, batch_size=128, output_dir=None):

    print('computing reconstruction loss on test images')
    rec_imgs = []
    imgs = []
    costs = []
    ntest = len(test_data)

    for n in tqdm(range(ntest / batch_size)):
        imb = test_data[n*batch_size:(n+1)*batch_size, ...]
        # imb = train_dcgan_utils.transform(xmb, nc=3)
        [cost, gx] = _train_p_cost(imb)
        costs.append(cost)
        ntest = ntest + 1
        if n == 0:
            utils.print_numpy(imb)
            utils.print_numpy(gx)
            imgs.append(train_dcgan_utils.inverse_transform(imb, npx=npx, nc=nc))
            rec_imgs.append(train_dcgan_utils.inverse_transform(gx, npx=npx, nc=nc))

    if output_dir is not None:
        # st()
        save_samples = np.hstack(np.concatenate(imgs, axis=0))
        save_recs = np.hstack(np.concatenate(rec_imgs, axis=0))
        save_comp = np.vstack([save_samples, save_recs])
        mean_cost = np.mean(costs)

        txt = 'epoch = %3.3d, cost = %3.3f' % (n_epochs, mean_cost)

        width = save_comp.shape[1]
        save_f = (save_comp*255).astype(np.uint8)
        html.save_image([save_f], [''], header=txt, width=width, cvt=True)
        html.save()
        save_cvt = cv2.cvtColor(save_f, cv2.COLOR_RGB2BGR)
        cv2.imwrite(os.path.join(rec_dir, 'rec_epoch_%5.5d.png'%n_epochs), save_cvt)

    return mean_cost
forest.py 文件源码 项目:checkmymeat 作者: kendricktan 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def predict(url):
    global model, COOKED_PHRASES, RAW_PHRASES   
    # Read image
    image = io.imread(url)
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    image = cv2.resize(image, (500, 500), interpolation=cv2.INTER_CUBIC)    

    # Use otsu to mask
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    ret, mask = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    mask = cv2.medianBlur(mask, 5)

    # Get features
    features = describe(image, mask)

    # Predict it
    result = model.predict([features])
    probability = model.predict_proba([features])[0][result][0]        
    state = le.inverse_transform(result)[0]

    phrase = ''

    if 'cook' in state:
        phrase = COOKED_PHRASES[int(random.random()*len(COOKED_PHRASES))]
    elif 'raw' in state:
        phrase = RAW_PHRASES[int(random.random()*len(RAW_PHRASES))]

    return {'type': state, 'confidence': probability, 'phrase': phrase}
tester.py 文件源码 项目:focal-loss 作者: unsky 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1):
    """
    visualize all detections in one image
    :param im_array: [b=1 c h w] in rgb
    :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
    :param class_names: list of names in imdb
    :param scale: visualize the scaled image
    :return:
    """
    import cv2
    import random
    color_white = (255, 255, 255)
    im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS)
    # change to bgr
    im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
    for j, name in enumerate(class_names):
        if name == '__background__':
            continue
        color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256))  # generate a random color
        dets = detections[j]
        for det in dets:
            bbox = det[:4] * scale
            score = det[-1]
            if score < threshold:
                continue
            bbox = map(int, bbox)
            cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
            cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
                        color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
    return im
trainer_matches.py 文件源码 项目:Yugioh-bot 作者: will7200 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, query, x=0, y=0):
        self.query = query
        self.xThreshold = x
        self.yThreshold = y
        if type(query) is Pillow.Image.Image:
            self.query = cv2.cvtColor(np.array(self.query), cv2.COLOR_RGB2BGR)
        elif type(query) is np.ndarray:
            self.query = query
        else:
            self.query = cv2.imread(query, 0)
        self.goodMatches = []
        self.images = []
        self.circlePoints = []
        self.kmeans = None
        self.white_query = None
convert_video.py 文件源码 项目:neural_style_synthesizer 作者: dwango 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def convert_video(self, video_path, output_directory, skip=0, resize=400):
        video = cv2.VideoCapture(video_path)
        video_output = None
        i = 0
        img_init = None
        while video.get(cv2.cv.CV_CAP_PROP_POS_AVI_RATIO) < 1.0:
            i += 1
            for _ in range(skip+1):
                status, bgr_img = video.read()
            img = PIL.Image.fromarray(cv2.cvtColor(
                bgr_img,
                cv2.COLOR_BGR2RGB
            ))
            img = neural_art.utility.resize_img(img, resize)
            if video_output is None:
                video_output = cv2.VideoWriter(
                    "{}/out.avi".format(output_directory),
                    fourcc=0, #raw
                    fps=video.get(cv2.cv.CV_CAP_PROP_FPS) / (skip + 1),
                    frameSize=img.size,
                    isColor=True
                )
                if(not video_output.isOpened()):
                    raise(Exception("Cannot Open VideoWriter"))
            if img_init is None:
                img_init = img
            converted_img = self.frame_converter.convert(img, init_img=img_init, iteration=self.iteration)
            converted_img.save("{}/converted_{:05d}.png".format(output_directory, i))
            img_init = converted_img
            video_output.write(cv2.cvtColor(
                numpy.asarray(converted_img),
                cv2.COLOR_RGB2BGR
            ))
        video_output.release()
main.py 文件源码 项目:DeepLearning 作者: Wanwannodao 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def main(_):
    loader = Loader(FLAGS.data_dir, FLAGS.data, FLAGS.batch_size)
    print("# of data: {}".format(loader.data_num))
    with tf.Session() as sess:                                
        lsgan = LSGAN([FLAGS.batch_size, 112, 112, 3])
        sess.run(tf.global_variables_initializer())

        for epoch in range(10000):
            loader.reset()

            for step in range(int(loader.batch_num/FLAGS.d)):
                if (step == 0 and epoch % 1 == 100):
                    utils.visualize(sess.run(lsgan.gen_img), epoch)

                for _ in range(FLAGS.d):
                    batch = np.asarray(loader.next_batch(), dtype=np.float32)
                    batch = (batch-127.5) / 127.5
                    #print("{}".format(batch.shape))
                    feed={lsgan.X: batch}
                    _ = sess.run(lsgan.d_train_op, feed_dict=feed)
                        #utils.visualize(batch, (epoch+1)*100)

                #cv2.namedWindow("window")
                #cv2.imshow("window", cv2.cvtColor(batch[0], cv2.COLOR_RGB2BGR))
                #cv2.waitKey(0)
                #cv2.destroyAllWindows()

                _ = sess.run(lsgan.g_train_op)
tester.py 文件源码 项目:Deformable-ConvNets 作者: msracver 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1):
    """
    visualize all detections in one image
    :param im_array: [b=1 c h w] in rgb
    :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
    :param class_names: list of names in imdb
    :param scale: visualize the scaled image
    :return:
    """
    import cv2
    import random
    color_white = (255, 255, 255)
    im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS)
    # change to bgr
    im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
    for j, name in enumerate(class_names):
        if name == '__background__':
            continue
        color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256))  # generate a random color
        dets = detections[j]
        for det in dets:
            bbox = det[:4] * scale
            score = det[-1]
            if score < threshold:
                continue
            bbox = map(int, bbox)
            cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
            cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
                        color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
    return im


问题


面经


文章

微信
公众号

扫码关注公众号