python类imsave()的实例源码

image_processing.py 文件源码 项目:TAC-GAN 作者: dashayushman 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def load_image_array(image_file, image_size,
                     image_id, data_dir='Data/datasets/mscoco/train2014',
                     mode='train'):
    img = None
    if os.path.exists(image_file):
        #print('found' + image_file)
        img = skimage.io.imread(image_file)
    else:
        print('notfound' + image_file)
        img = skimage.io.imread('http://mscoco.org/images/%d' % (image_id))
        img_path = os.path.join(data_dir, 'COCO_%s2014_%.12d.jpg' % ( mode,
                                                                      image_id))
        skimage.io.imsave(img_path, img)

    # GRAYSCALE
    if len(img.shape) == 2:
        img_new = np.ndarray( (img.shape[0], img.shape[1], 3), dtype = 'uint8')
        img_new[:,:,0] = img
        img_new[:,:,1] = img
        img_new[:,:,2] = img
        img = img_new

    img_resized = skimage.transform.resize(img, (image_size, image_size))

    # FLIP HORIZONTAL WIRH A PROBABILITY 0.5
    if random.random() > 0.5:
        img_resized = np.fliplr(img_resized)

    return img_resized.astype('float32')
utils.py 文件源码 项目:neural-fonts 作者: periannath 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def save_concat_images(imgs, img_path):
    concated = np.concatenate(imgs, axis=1)
    misc.imsave(img_path, concated)
test.py 文件源码 项目:GeneGAN 作者: Prinsphield 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def swap_attribute(src_img, att_img, model_dir, model, gpu):
    '''
    Input
        src_img: the source image that you want to change its attribute
        att_img: the attribute image that has certain attribute
        model_dir: the directory that contains the checkpoint, ckpt.* files
        model: the GeneGAN network that defined in train.py
        gpu: for example, '0,1'. Use '' for cpu mode
    Output
        out1: src_img with attributes
        out2: att_img without attributes
    '''
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu 
    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        ckpt = tf.train.get_checkpoint_state(model_dir)
        # print(ckpt)
        # print(ckpt.model_checkpoint_path)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        out2, out1 = sess.run([model.Ae, model.Bx], feed_dict={model.Ax: att_img, model.Be: src_img})
        misc.imsave('out1.jpg', out1[0])
        misc.imsave('out2.jpg', out2[0])
test.py 文件源码 项目:GeneGAN 作者: Prinsphield 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def interpolation(src_img, att_img, inter_num, model_dir, model, gpu):
    '''
    Input
        src_img: the source image that you want to change its attribute
        att_img: the attribute image that has certain attribute
        inter_num: number of interpolation points
        model_dir: the directory that contains the checkpoint, ckpt.* files
        model: the GeneGAN network that defined in train.py
        gpu: for example, '0,1'. Use '' for cpu mode
    Output
        out: [src_img, inter1, inter2, ..., inter_{inter_num}]
    '''
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu
    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        ckpt = tf.train.get_checkpoint_state(model_dir)
        # print(ckpt)
        # print(ckpt.model_checkpoint_path)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        out = src_img[0]
        for i in range(1, inter_num + 1):
            lambda_i = i / float(inter_num)
            model.out_i = model.joiner('G_joiner', model.B, model.x * lambda_i) 
            out_i = sess.run(model.out_i, feed_dict={model.Ax: att_img, model.Be: src_img})
            out = np.concatenate((out, out_i[0]), axis=1)
        # print(out.shape)
        misc.imsave('interpolation.jpg', out)
test.py 文件源码 项目:GeneGAN 作者: Prinsphield 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def interpolation2(src_img, att_img, inter_num, model_dir, model, gpu):
    '''
    Input
        src_img: the source image that you want to change its attribute
        att_img: the attribute image that has certain attribute
        inter_num: number of interpolation points
        model_dir: the directory that contains the checkpoint, ckpt.* files
        model: the GeneGAN network that defined in train.py
        gpu: for example, '0,1'. Use '' for cpu mode
    Output
        out: [src_img, inter1, inter2, ..., inter_{inter_num}]
    '''
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu
    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        ckpt = tf.train.get_checkpoint_state(model_dir)
        # print(ckpt)
        # print(ckpt.model_checkpoint_path)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        B, src_feat = sess.run([model.B, model.e], feed_dict={model.Be: src_img})
        att_feat = sess.run(model.x, feed_dict={model.Ax: att_img})

        out = src_img[0]
        for i in range(1, inter_num + 1):
            lambda_i = i / float(inter_num)
            out_i = sess.run(model.joiner('G_joiner', B, src_feat + (att_feat - src_feat) * lambda_i) )
            out = np.concatenate((out, out_i[0]), axis=1)
        # print(out.shape)
        misc.imsave('interpolation2.jpg', out)
Train.py 文件源码 项目:BirdProject 作者: ZlodeiBaal 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def AddNoize(i):
    R = random.randint(0, 1)
    if (R==1):
        i=np.fliplr(i)#random mirroring
    R = random.randint(0, 1)
    if (R==1):
        R = random.randint(-10, 10)
        i= ndimage.interpolation.rotate(i,R)#random rotation
    R = random.randint(0, 1)
    if (R==1):
        crop_left=random.randint(0,15)
        crop_right = random.randint(1, 15)
        crop_top = random.randint(0, 15)
        crop_bot = random.randint(1, 15)
        i=i[crop_left:-crop_right,crop_top:-crop_bot,:] #Randomcrop
    #Next code is VERY SLOW becoase it use Python to change brightness
    #need to optimase it, but i have no time yet:)
    R = random.randint(0, 2)
    if (R==2): #Random brightness in R channel
        d = random.random()+1
        i[:, :, 0] = adjust_gamma(i[:,:,0],d)
    R = random.randint(0, 2)
    if (R==2): #Random brightness in G channel
        d = random.random()+1
        i[:, :, 1] = adjust_gamma(i[:, :, 1], d)
    R = random.randint(0, 2)
    if (R==2): #Random brightness in B channel
        d = random.random()+1
        i[:, :, 2] = adjust_gamma(i[:, :, 2], d)
    #misc.imsave("test.jpg",i)
    return i


#Prepare data for learning
utils.py 文件源码 项目:WassersteinGAN.tensorflow 作者: shekkizh 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def save_image(image, image_size, save_dir, name=""):
    """
    Save image by unprocessing assuming mean 127.5
    :param image:
    :param save_dir:
    :param name:
    :return:
    """
    image += 1
    image *= 127.5
    image = np.clip(image, 0, 255).astype(np.uint8)
    image = np.reshape(image, (image_size, image_size, -1))
    misc.imsave(os.path.join(save_dir, name + "pred_image.png"), image)
process_cities.py 文件源码 项目:learning-to-see-by-moving 作者: pulkitag 项目源码 文件源码 阅读 48 收藏 0 点赞 0 评论 0
def resize_raw_images(prms):
    rawNames,_,_ = get_imnames(prms, isRaw=True)
    tgNames,_,_    = get_imnames(prms)
    for rn,tn in zip(rawNames, tgNames):
        im = scm.imread(rn)
        im = scm.imresize(im, [320, 480])
        dName = os.path.dirname(tn)
        if not os.path.exists(dName):
            os.makedirs(dName)
        scm.imsave(tn, im)

##
# Write pairs file
process_cities.py 文件源码 项目:learning-to-see-by-moving 作者: pulkitag 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def vis_pairs(prms, isSave=False, svIdx=None, svPath=None):
    imName1, imName2, euls, trans = read_pairs(prms)
    N = len(imName1)
    seed      = 3
    randState = np.random.RandomState(seed)
    perm = randState.permutation(N)
    fig = plt.figure()
    plt.ion()
    imName1 = [imName1[i] for i in perm]
    imName2 = [imName2[i] for i in perm]
    euls    = [euls[i] for i in perm]
    trans   = [trans[i] for i in perm]
    titleStr = 'Trans: ' + '%.3f ' * 3 + 'Rot: ' + '%.3f ' * 3
    count   = 0
    numSave = 0
    for (im1,im2,eu,tr) in zip(imName1, imName2, euls, trans):
        titleName = titleStr % (tuple(tr) + eu)
        im1 = scm.imread(im1)
        im2 = scm.imread(im2)
        print count
        if isSave:
            if count in svIdx:
                imN1 = svPath % (count,1)
                imN2 = svPath % (count,2)
                scm.imsave(imN1,im1)
                scm.imsave(imN2,im2)
                numSave += 1
                if numSave==len(svIdx):
                    return 
        else:
            vu.plot_pairs(im1, im2, fig, titleStr=titleName)
            cmd = raw_input()   
            if cmd == 'exit':
                return
        count += 1
##
dataset_builder.py 文件源码 项目:images-web-crawler 作者: amineHorseman 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def reshape_images(cls, source_folder, target_folder, height=128, width=128,
                       extensions=('.jpg', '.jpeg', '.png')):
        """ copy images and reshape them"""

        # check source_folder and target_folder:
        cls.check_folder_existance(source_folder, throw_error_if_no_folder=True)
        cls.check_folder_existance(target_folder, display_msg=False)
        if source_folder[-1] == "/":
            source_folder = source_folder[:-1]
        if target_folder[-1] == "/":
            target_folder = target_folder[:-1]

        # read images and reshape:
        print("Resizing '", source_folder, "' images...")
        for filename in os.listdir(source_folder):
            if os.path.isdir(source_folder + '/' + filename):
                cls.reshape_images(source_folder + '/' + filename,
                                   target_folder + '/' + filename,
                                   height, width, extensions=extensions)
            else:
                if extensions == '' and os.path.splitext(filename)[1] == '':
                    copy2(source_folder + "/" + filename,
                          target_folder + "/" + filename)
                    image = ndimage.imread(target_folder + "/" + filename, mode="RGB")
                    image_resized = misc.imresize(image, (height, width))
                    misc.imsave(target_folder + "/" + filename, image_resized)
                else:
                    for extension in extensions:
                        if filename.endswith(extension):
                            copy2(source_folder + "/" + filename,
                                  target_folder + "/" + filename)
                            image = ndimage.imread(target_folder + "/" + filename, mode="RGB")
                            image_resized = misc.imresize(image, (height, width))
                            misc.imsave(target_folder + "/" + filename, image_resized)
dataset_builder.py 文件源码 项目:images-web-crawler 作者: amineHorseman 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def crop_images(cls, source_folder, target_folder, height=128, width=128,
                       extensions=('.jpg', '.jpeg', '.png')):
        """ copy images and center crop them"""

        # check source_folder and target_folder:
        cls.check_folder_existance(source_folder, throw_error_if_no_folder=True)
        cls.check_folder_existance(target_folder, display_msg=False)
        if source_folder[-1] == "/":
            source_folder = source_folder[:-1]
        if target_folder[-1] == "/":
            target_folder = target_folder[:-1]

        # read images and crop:
        print("Cropping '", source_folder, "' images...")
        for filename in os.listdir(source_folder):
            if os.path.isdir(source_folder + '/' + filename):
                cls.crop_images(source_folder + '/' + filename,
                                   target_folder + '/' + filename,
                                   height, width, extensions=extensions)
            else:
                if extensions == '' and os.path.splitext(filename)[1] == '':
                    copy2(source_folder + "/" + filename,
                          target_folder + "/" + filename)
                    image = ndimage.imread(target_folder + "/" + filename, mode="RGB")
                    [width_original, height_original, _] = image.shape
                    offset_w = (width_original - width) / 2
                    offset_h = (width_original - width) / 2
                    image_cropped = image[offset_w : width + offset_w, offset_h : height + offset_h, :]
                    misc.imsave(target_folder + "/" + filename, image_cropped)
                else:
                    for extension in extensions:
                        if filename.endswith(extension):
                            copy2(source_folder + "/" + filename,
                                  target_folder + "/" + filename)
                            image = ndimage.imread(target_folder + "/" + filename, mode="RGB")
                            [width_original, height_original, _] = image.shape
                            offset_w = (width_original - width) / 2
                            offset_h = (width_original - width) / 2
                            image_cropped = image[offset_w : width + offset_w, offset_h : height + offset_h, :]
                            misc.imsave(target_folder + "/" + filename, image_cropped)
dataset_builder.py 文件源码 项目:images-web-crawler 作者: amineHorseman 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def convert_to_grayscale(cls, source_folder, target_folder,
                             extensions=('.jpg', '.jpeg', '.png')):
        """ convert images from RGB to Grayscale"""

        # check source_folder and target_folder:
        cls.check_folder_existance(source_folder, throw_error_if_no_folder=True)
        cls.check_folder_existance(target_folder, display_msg=False)
        if source_folder[-1] == "/":
            source_folder = source_folder[:-1]
        if target_folder[-1] == "/":
            target_folder = target_folder[:-1]

        # read images and reshape:
        print("Convert '", source_folder, "' images to grayscale...")
        for filename in os.listdir(source_folder):
            if os.path.isdir(source_folder + '/' + filename):
                cls.convert_to_grayscale(source_folder + '/' + filename,
                                         target_folder + '/' + filename,
                                         extensions=extensions)
            else:
                if extensions == '' and os.path.splitext(filename)[1] == '':
                    copy2(source_folder + "/" + filename,
                          target_folder + "/" + filename)
                    image = ndimage.imread(target_folder + "/" + filename, flatten=True)
                    misc.imsave(target_folder + "/" + filename, image)
                else:
                    for extension in extensions:
                        if filename.endswith(extension):
                            copy2(source_folder + "/" + filename,
                                  target_folder + "/" + filename)
                            image = ndimage.imread(target_folder + "/" + filename, flatten=True)
                            misc.imsave(target_folder + "/" + filename, image)
dataset_builder.py 文件源码 项目:images-web-crawler 作者: amineHorseman 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def convert_format(cls, source_folder, target_folder,
                       extensions=('.jpg', '.jpeg', '.png'), new_extension='.jpg'):
        """ change images from one format to another (eg. change png files to jpeg) """

        # check source_folder and target_folder:
        cls.check_folder_existance(source_folder, throw_error_if_no_folder=True)
        cls.check_folder_existance(target_folder, display_msg=False)
        if source_folder[-1] == "/":
            source_folder = source_folder[:-1]
        if target_folder[-1] == "/":
            target_folder = target_folder[:-1]

        # read images and reshape:
        print("Change format of '", source_folder, "' files...")
        for filename in os.listdir(source_folder):
            if os.path.isdir(source_folder + '/' + filename):
                cls.convert_format(source_folder + '/' + filename,
                                   target_folder + '/' + filename,
                                   extensions=extensions, new_extension=new_extension)
            else:
                if extensions == '' and os.path.splitext(filename)[1] == '':
                    copy2(source_folder + "/" + filename,
                          target_folder + "/" + filename + new_extension)
                    image = ndimage.imread(target_folder + "/" + filename + new_extension)
                    misc.imsave(target_folder + "/" + filename + new_extension, image)
                else:
                    for extension in extensions:
                        if filename.endswith(extension):
                            new_filename = os.path.splitext(filename)[0] + new_extension
                            copy2(source_folder + "/" + filename,
                                  target_folder + "/" + new_filename)
                            image = ndimage.imread(target_folder + "/" + new_filename)
                            misc.imsave(target_folder + "/" + new_filename, image)
TensorflowUtils.py 文件源码 项目:AutoPortraitMatting 作者: PetroWu 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def save_image(image, save_dir, name, mean=None):
    """
    Save image by unprocessing if mean given else just save
    :param mean:
    :param image:
    :param save_dir:
    :param name:
    :return:
    """
    if mean:
        image = unprocess_image(image, mean)
    misc.imsave(os.path.join(save_dir, name + ".png"), image)
FCN.py 文件源码 项目:AutoPortraitMatting 作者: PetroWu 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def save_alpha_img(org, mat, name):
    w, h, _ = mat.shape
    #print(mat[200:210, 200:210])
    rmat = np.reshape(mat, (w, h))
    amat = np.zeros((w, h, 4), dtype=np.int)
    amat[:, :, 3] = rmat * 1000
    amat[:, :, 0:3] = org
    print(amat[200:205, 200:205])
    #im = Image.fromarray(np.uint8(amat))
    #im.save(name + '.png')
    misc.imsave(name + '.png', amat)
TensorflowUtils_plus.py 文件源码 项目:AutoPortraitMatting 作者: PetroWu 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def save_image(image, save_dir, name, mean=None):
    """
    Save image by unprocessing if mean given else just save
    :param mean:
    :param image:
    :param save_dir:
    :param name:
    :return:
    """
    if mean:
        image = unprocess_image(image, mean)
    misc.imsave(os.path.join(save_dir, name + ".png"), image)

# as describe at Sec.4.2
FCN_plus.py 文件源码 项目:AutoPortraitMatting 作者: PetroWu 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def save_alpha_img(org, mat, name):
    w, h = mat.shape[0], mat.shape[1]
    #print(mat[200:210, 200:210])
    rmat = np.reshape(mat, (w, h))
    amat = np.zeros((w, h, 4), dtype=np.int)
    amat[:, :, 3] = np.round(rmat * 1000)
    amat[:, :, 0:3] = org
    #print(amat[200:205, 200:205])
    #im = Image.fromarray(np.uint8(amat))
    #im.save(name + '.png')
    misc.imsave(name + '.png', amat)
utils.py 文件源码 项目:tf_cnnvis 作者: InFoCusp 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _write_deepdream(images, layer, path_outdir, path_logdir):
    is_success = True

    images = _im_normlize([images])
    layer, units, k = layer
    # write into disk
    path_out = os.path.join(path_outdir, "deepdream_" + layer.lower().replace("/", "_"))
    is_success = make_dir(path_out)

    for i in range(len(images)):
        for j in range(images[i].shape[0]):
            img_save = images[i][j]
            if img_save.shape[2] == 1:
                img_save = np.squeeze(img_save, axis=2)
            imsave(os.path.join(path_out, "image_%d.png" % (units[(i * images[i].shape[0]) + j + k])), img_save, format = "png")

    # write into logfile
    path_log = os.path.join(path_logdir, layer.lower().replace("/", "_"))
    is_success = make_dir(path_log)

    with tf.Graph().as_default() as g:
        image = tf.placeholder(tf.float32, shape = [None, None, None, None])

        image_summary_t = tf.summary.image(name = "One_By_One_DeepDream", tensor = image, max_outputs = config["MAX_FEATUREMAP"])

        with tf.Session() as sess:
            summary = sess.run(image_summary_t, feed_dict = {image : np.concatenate(images, axis = 0)})
        try:
            file_writer = tf.summary.FileWriter(path_log, g) # create file writer
            # compute and write the summary
            file_writer.add_summary(summary)
        except:
            is_success = False
            print("Error occured in writting results into log file.")
        finally:
            file_writer.close() # close file writer
    return is_success


问题


面经


文章

微信
公众号

扫码关注公众号