python类zoom()的实例源码

dream.py 文件源码 项目:deepdream-neural-style-transfer 作者: rdcolema 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def deepdream(net, base_img, iter_n=70, octave_n=7, octave_scale=1.4, end='inception_5a/pool_proj', clip=True,
              **step_params):
    # prepare base images for all octaves
    octaves = [preprocess(net, base_img)]
    for i in xrange(octave_n - 1):
        octaves.append(nd.zoom(octaves[-1], (1, 1.0 / octave_scale, 1.0 / octave_scale), order=1))

    src = net.blobs['data']
    detail = np.zeros_like(octaves[-1])  # allocate image for network-produced details
    for octave, octave_base in enumerate(octaves[::-1]):
        h, w = octave_base.shape[-2:]
        if octave > 0:
            # upscale details from the previous octave
            h1, w1 = detail.shape[-2:]
            detail = nd.zoom(detail, (1, 1.0 * h / h1, 1.0 * w / w1), order=1)

        src.reshape(1, 3, h, w)  # resize the network's input image size
        src.data[0] = octave_base + detail
        for i in xrange(iter_n):
            make_step(net, end=end, clip=clip, **step_params)

            # visualization
            vis = deprocess(net, src.data[0])
            if not clip:  # adjust image contrast if clipping is disabled
                vis = vis * (255.0 / np.percentile(vis, 99.98))
            showarray(vis)
            print octave, i, end, vis.shape
            clear_output(wait=True)

        # extract details produced on the current octave
        detail = src.data[0] - octave_base
    # returning the resulting image
    return deprocess(net, src.data[0])
pypher.py 文件源码 项目:pypher 作者: aboucaud 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def imresample(image, source_pscale, target_pscale, interp_order=1):
    """
    Resample data array from one pixel scale to another

    The resampling ensures the parity of the image is conserved
    to preserve the centering.

    Parameters
    ----------
    image : `numpy.ndarray`
        Input data array
    source_pscale : float
        Pixel scale of ``image`` in arcseconds
    target_pscale : float
        Pixel scale of output array in arcseconds
    interp_order : int, optional
        Spline interpolation order [0, 5] (default 1: linear)

    Returns
    -------
    output : `numpy.ndarray`
        Resampled data array

    """
    old_size = image.shape[0]
    new_size_raw = old_size * source_pscale / target_pscale
    new_size = int(np.ceil(new_size_raw))

    if new_size > 10000:
        raise MemoryError("The resampling will yield a too large image. "
                          "Please resize the input PSF image.")

    # Chech for parity
    if (old_size - new_size) % 2 == 1:
        new_size += 1

    ratio = new_size / old_size

    return zoom(image, ratio, order=interp_order) / ratio**2
NLLGrid.py 文件源码 项目:backtrackbb 作者: BackTrackBB 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def resample(self, dx, dy, dz):
        zoom_x = self.dx / dx
        zoom_y = self.dy / dy
        zoom_z = self.dz / dz
        self.array = zoom(self.array, (zoom_x, zoom_y, zoom_z))
        self.nx, self.ny, self.nz = self.array.shape
        if self.type == 'SLOW_LEN':
            self.array *= dx / self.dx
        self.dx = dx
        self.dy = dy
        self.dz = dz
dataIO.py 文件源码 项目:tf-3dgan 作者: meetshah1995 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def getVolumeFromOFF(path, sideLen=32):
    mesh = trimesh.load(path)
    volume = trimesh.voxel.Voxel(mesh, 0.5).raw
    (x, y, z) = map(float, volume.shape)
    volume = nd.zoom(volume.astype(float), 
                     (sideLen/x, sideLen/y, sideLen/z),
                     order=1, 
                     mode='nearest')
    volume[np.nonzero(volume)] = 1.0
    return volume.astype(np.bool)
dataIO.py 文件源码 项目:tf-3dgan 作者: meetshah1995 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def getVoxelFromMat(path, cube_len=64):
    voxels = io.loadmat(path)['instance']
    voxels = np.pad(voxels,(1,1),'constant',constant_values=(0,0))
    if cube_len != 32 and cube_len == 64:
        voxels = nd.zoom(voxels, (2,2,2), mode='constant', order=0)
    return voxels
DetectEmotion.py 文件源码 项目:webCamEmocognizer 作者: DeepInEvil 项目源码 文件源码 阅读 12 收藏 0 点赞 0 评论 0
def extract_face_features(gray, detected_face, offset_coefficients):
        (x, y, w, h) = detected_face
        #print x , y, w ,h
        horizontal_offset = np.int(np.floor(offset_coefficients[0] * w))
        vertical_offset = np.int(np.floor(offset_coefficients[1] * h))


        extracted_face = gray[y+vertical_offset:y+h, 
                          x+horizontal_offset:x-horizontal_offset+w]
        #print extracted_face.shape
        new_extracted_face = zoom(extracted_face, (48. / extracted_face.shape[0], 
                                               48. / extracted_face.shape[1]))
        new_extracted_face = new_extracted_face.astype(np.float32)
        new_extracted_face /= float(new_extracted_face.max())
        return new_extracted_face
show_images.py 文件源码 项目:CNNbasedMedicalSegmentation 作者: BRML 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def re_rescale(im):
    d_im = zoom(im, (1, 0.5, 0.8), order=3)
    d_im = zoom(d_im, (1, 2, (1/0.8)), order=3)

    return d_im
show_images.py 文件源码 项目:CNNbasedMedicalSegmentation 作者: BRML 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def show_downsize():
    for im in gen_images(n=-1, crop=True):
        t_im = im['T1c']
        gt = im['gt']

        t_im = np.asarray(t_im, dtype='float32')
        gt = np.asarray(gt, dtype='float32')

        d_im = zoom(t_im, 0.5, order=3)
        d_gt = zoom(gt, 0.5, order=0)
        print 'New shape: ', d_im.shape

        slices1 = np.arange(0, d_im.shape[0], d_im.shape[0]/20)
        slices2 = np.arange(0, t_im.shape[0], t_im.shape[0]/20)

        for s1, s2 in zip(slices1, slices2):
            d_im_slice = d_im[s1]
            d_gt_slice = d_gt[s1]

            im_slice = t_im[s2]
            gt_slice = gt[s2]

            title0= 'Original'
            title1= 'Downsized'
            vis_ims(im0=im_slice, gt0=gt_slice, im1=d_im_slice, 
                gt1=d_gt_slice, title0=title0, title1=title1)
read_images.py 文件源码 项目:CNNbasedMedicalSegmentation 作者: BRML 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def get_im_as_ndarray(image, downsize=False):
    ims = [image['Flair'], image['T1'], image['T1c'], image['T2']]
    if downsize:
        ims = [zoom(x, 0.5, order=1) for x in ims]
    im = np.array(ims, dtype='int16')

    return im
read_images.py 文件源码 项目:CNNbasedMedicalSegmentation 作者: BRML 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def get_gt(gt, n_classes, downsize=False):
    if not downsize:
        return gt
    original_shape = gt.shape
    gt_onehot = np.reshape(gt, (-1,))
    gt_onehot = np.reshape(one_hot(gt_onehot, n_classes), original_shape + (n_classes,))
    gt_onehot = np.transpose(gt_onehot, (3, 0, 1, 2))

    zoom_gt = np.array([zoom(class_map, 0.5, order=1) for class_map in gt_onehot])
    zoom_gt = zoom_gt.argmax(axis=0)
    zoom_gt = np.asarray(zoom_gt, dtype='int8')

    return zoom_gt
read_images.py 文件源码 项目:CNNbasedMedicalSegmentation 作者: BRML 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def process_gt(gt, n_classes, downsize=False):
    if downsize:
        gt = zoom(gt, 0.5, order=0)
        gt = np.asarray(gt, dtype='int8')
    gt = np.transpose(gt, (1, 2, 0))
    l = np.reshape(gt, (-1,))
    l = np.reshape(one_hot(l, n_classes), (-1, n_classes))
    return l
image_read_write.py 文件源码 项目:kaggle_dsb 作者: syagev 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def load_itk_image_rescaled(filename, slice_mm):
    im, origin, spacing = load_itk_image(filename)

    new_im = zoom(im, [spacing[0]/slice_mm,1.0,1.0])
    return new_im
deepdream.py 文件源码 项目:DeepArt 作者: jiriroz 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def dream(self, base_img, iter_n=10, octave_n=4, octave_scale=1.4, 
                end='inception_4c/output', clip=True, guide_features=None, name="dream", **step_params):
        # prepare base images for all octaves
        octaves = [self.preprocess(base_img)]
        for i in xrange(octave_n-1):
            octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))

        src = self.net.blobs['data']
        detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
        for octave, octave_base in enumerate(octaves[::-1]):
            h, w = octave_base.shape[-2:]
            if octave > 0:
                # upscale details from the previous octave
                h1, w1 = detail.shape[-2:]
                detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)

            src.reshape(1,3,h,w) # resize the network's input image size
            src.data[0] = octave_base+detail
            for i in xrange(iter_n):
                self.make_step(end=end, clip=clip, guide_features=guide_features, **step_params)

                # visualization
                vis = self.deprocess(src.data[0])
                # adjust image contrast if clipping is disabled
                if not clip:
                    vis = vis*(255.0/np.percentile(vis, 99.98))
                print octave, i, end, vis.shape
                clear_output(wait=True)

            # extract details produced on the current octave
            detail = src.data[0]-octave_base
        self.showarray(vis, name)
        # returning the resulting image
        return self.deprocess(src.data[0])
ADMMutils.py 文件源码 项目:sparsecnn 作者: fkiaee 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def read_image(path):
    img = imread(path,mode="RGB")
    h, w, c = np.shape(img)
    scale_size = 256
    crop_size = 224
    assert c == 3
    img = zoom(img, (scale_size/h, scale_size/w,1))
    img = img.astype(np.float32)
    img -= np.array([104., 117., 124.])
    h, w, c = img.shape
    ho, wo = ((h - crop_size) / 2, (w - crop_size) / 2)
    img = img[ho:ho + crop_size, wo:wo + crop_size, :]
    #print(np.shape(img))
    img = img[None, ...]
    return img
NLLGrid.py 文件源码 项目:nllgrid 作者: claudiodsf 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def resample(self, dx, dy, dz):
        if self.type in ['ANGLE', 'ANGLE2D']:
            raise NotImplementedError(
                'Resample not implemented for ANGLE grid.')
        zoom_x = self.dx / dx
        zoom_y = self.dy / dy
        zoom_z = self.dz / dz
        self.array = zoom(self.array, (zoom_x, zoom_y, zoom_z))
        self.nx, self.ny, self.nz = self.array.shape
        if self.type == 'SLOW_LEN':
            self.array *= dx / self.dx
        self.dx = dx
        self.dy = dy
        self.dz = dz
DeepDream.py 文件源码 项目:QScode 作者: PierreHao 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def Deepdream(self, base_img, iter_n=10, octave_n=4, octave_scale=1.4, clip=True):
        # prepare base images for all octaves
        octaves = [self.Preprocess(base_img)]
        for i in xrange(octave_n-1):
            # shrink the image octave[0] so that function always return image size as octave[0]
            octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))

        src = self.net.blobs['data']
        detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
        for octave, octave_base in enumerate(octaves[::-1]):# from end to 0
            h, w = octave_base.shape[-2:]
            if octave > 0:
                # upscale details from the previous octave
                h1, w1 = detail.shape[-2:]
                detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
            src.reshape(1,3,h,w) # resize the network's input image size
            src.data[0] = octave_base+detail
            for i in xrange(iter_n):
                self.Make_step()
                # visualization
                '''
                vis = self.deprocess(net, src.data[0])
                if not clip: # adjust image contrast if clipping is disabled
                    vis = vis*(255.0/np.percentile(vis, 99.98))
                showarray(vis)
                print octave, i, end, vis.shape
                clear_output(wait=True)
                '''
            # extract details produced on the current octave
            #print octave, self.end, src.data[0].shape
            detail = src.data[0]-octave_base
        # returning the resulting image
        return self.Deprocess(src.data[0])
DeepDream.py 文件源码 项目:QScode 作者: PierreHao 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def Deepdream(self, base_img, iter_n=10, octave_n=4, octave_scale=1.4, clip=True):
        # prepare base images for all octaves
        octaves = [self.Preprocess(base_img)]
        for i in xrange(octave_n-1):
            # shrink the image octave[0] so that function always return image size as octave[0]
            octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))

        src = self.net.blobs['data']
        detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
        for octave, octave_base in enumerate(octaves[::-1]):# from end to 0
            h, w = octave_base.shape[-2:]
            if octave > 0:
                # upscale details from the previous octave
                h1, w1 = detail.shape[-2:]
                detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
            src.reshape(1,3,h,w) # resize the network's input image size
            src.data[0] = octave_base+detail
            for i in xrange(iter_n):
                self.Make_step()
                # visualization
                '''
                vis = self.deprocess(net, src.data[0])
                if not clip: # adjust image contrast if clipping is disabled
                    vis = vis*(255.0/np.percentile(vis, 99.98))
                showarray(vis)
                print octave, i, end, vis.shape
                clear_output(wait=True)
                '''
            # extract details produced on the current octave
            #print octave, self.end, src.data[0].shape
            detail = src.data[0]-octave_base
        # returning the resulting image
        return self.Deprocess(src.data[0])
TwoDMaps.py 文件源码 项目:pytorch-maddpg 作者: xuehy 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def resize(scale, old_mats):
    new_mats = []
    for mat in old_mats:
        new_mats.append(zoom(mat, scale, order=0))
    return np.array(new_mats)
zogy.py 文件源码 项目:ZOGY 作者: pmvreeswijk 项目源码 文件源码 阅读 46 收藏 0 点赞 0 评论 0
def ds9_arrays(**kwargs):

    cmd = ['ds9', '-zscale', '-zoom', '4', '-cmap', 'heat']
    for name, array in kwargs.items():
        # write array to fits
        fitsfile = 'ds9_'+name+'.fits'
        fits.writeto(fitsfile, np.array(array).astype(np.float32), clobber=True)            
        # append to command
        cmd.append(fitsfile)

    #print 'cmd', cmd
    result = subprocess.call(cmd)

################################################################################
metrics.py 文件源码 项目:xdesign 作者: tomography 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _compute_msssim(imQual, nlevels=5, sigma=1.2, L=1, K=(0.01, 0.03)):
    '''
    An implementation of the Multi-Scale Structural SIMilarity index (MS-SSIM).

    References
    -------------
    Multi-scale Structural Similarity Index (MS-SSIM)
    Z. Wang, E. P. Simoncelli and A. C. Bovik, "Multi-scale structural
    similarity for image quality assessment," Invited Paper, IEEE Asilomar
    Conference on Signals, Systems and Computers, Nov. 2003

    Parameters
    -------------
    imQual : ImageQuality
    nlevels : int
        The max number of levels to analyze
    sigma : float
        Sets the standard deviation of the gaussian filter. This setting
        determines the minimum scale at which quality is assessed.
    L : scalar
        The dynamic range of the data. This value is 1 for float
        representations and 2^bitdepth for integer representations.
    K : 2-tuple
        A list of two constants which help prevent division by zero.

    Returns
    -------
    imQual : ImageQuality
        A struct used to organize image quality information. NOTE: the valid
        range for SSIM is [-1, 1].
    '''
    _full_reference_input_check(imQual, sigma, nlevels, L)

    img1 = imQual.orig
    img2 = imQual.recon

    # The relative imporance of each level as determined by human experiment
    # weight = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]

    for level in range(0, nlevels):
        imQual += _compute_ssim(ImageQuality(img1, img2), sigma=sigma, L=L,
                                K=K, scale=sigma * 2**level)
        if level == nlevels - 1:
            break

        # Downsample (using ndimage.zoom to prevent sampling bias)
        img1 = scipy.ndimage.zoom(img1, 1/2)
        img2 = scipy.ndimage.zoom(img2, 1/2)

    return imQual


问题


面经


文章

微信
公众号

扫码关注公众号