python类warp()的实例源码

transforms.py 文件源码 项目:KagglePlanetPytorch 作者: Mctigger 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def augment(
        rotation_fn=lambda: np.random.random_integers(0, 360),
        translation_fn=lambda: (np.random.random_integers(-20, 20), np.random.random_integers(-20, 20)),
        scale_factor_fn=random_zoom_range(),
        shear_fn=lambda: np.random.random_integers(-10, 10)
):
    def call(x):
        rotation = rotation_fn()
        translation = translation_fn()
        scale = scale_factor_fn()
        shear = shear_fn()

        tf_augment = AffineTransform(scale=scale, rotation=np.deg2rad(rotation), translation=translation, shear=np.deg2rad(shear))
        tf = tf_center + tf_augment + tf_uncenter

        x = warp(x, tf, order=1, preserve_range=True, mode='symmetric')

        return x

    return call
tfi.py 文件源码 项目:tabea_video_project 作者: neilslater 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def affine_zoom( img, zoom, spin = 0 ):
    '''Returns new image derived from img, after a central-origin affine transform has been applied'''
    img_copy = img.copy()

    # Shift transforms allow Affine to be applied with centre of image as 0,0
    shift_y, shift_x, _ = (np.array(img_copy.shape)-1) / 2.
    shift_fwd = transform.SimilarityTransform(translation=[-shift_x, -shift_y])
    shift_back = transform.SimilarityTransform(translation=[shift_x, shift_y])
    affine = transform.AffineTransform( scale=(zoom, zoom), rotation=(spin * math.pi/180) )

    img_copy = transform.warp( img_copy,
                     ( shift_fwd + ( affine + shift_back )).inverse,
                     order=3,
                     clip=False, preserve_range=True,
                     mode='reflect').astype(np.float32)
    return img_copy
transforms.py 文件源码 项目:KagglePlanetPytorch 作者: Mctigger 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def augment_deterministic(
        rotation=0,
        translation=0,
        scale_factor=1,
        shear=0
):
    def call(x):
        scale = scale_factor, scale_factor
        rotation_tmp = rotation

        tf_augment = AffineTransform(
            scale=scale,
            rotation=np.deg2rad(rotation_tmp),
            translation=translation,
            shear=np.deg2rad(shear)
        )
        tf = tf_center + tf_augment + tf_uncenter

        x = warp(x, tf, order=1, preserve_range=True, mode='symmetric')

        return x

    return call
expt_utils.py 文件源码 项目:pyxem 作者: pyxem 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def affine_transformation(z, order, **kwargs):
    """Apply an affine transformation to a 2-dimensional array.

    Parameters
    ----------
    matrix : np.array
        3x3 numpy array specifying the affine transformation to be applied.
    order : int
        Interpolation order.

    Returns
    -------
    trans : array
        Affine transformed diffraction pattern.
    """
    shift_y, shift_x = np.array(z.shape[:2]) / 2.
    tf_shift = tf.SimilarityTransform(translation=[-shift_x, -shift_y])
    tf_shift_inv = tf.SimilarityTransform(translation=[shift_x, shift_y])

    transformation = tf.AffineTransform(**kwargs)
    trans = tf.warp(z, (tf_shift + (transformation + tf_shift_inv)).inverse,
                    order=order)

    return trans
scalable_reference_pattern.py 文件源码 项目:pyxem 作者: pyxem 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def function(self, x, y):

        signal2D = self.signal.data
        order = self.order
        d11 = self.d11.value
        d12 = self.d12.value
        d21 = self.d21.value
        d22 = self.d22.value
        t1 = self.t1.value
        t2 = self.t2.value

        D = np.array([[d11, d12, t1],
                      [d21, d22, t2],
                      [0., 0., 1.]])

        shifty, shiftx = np.array(signal2D.shape[:2]) / 2

        shift = tf.SimilarityTransform(translation=[-shiftx, -shifty])
        tform = tf.AffineTransform(matrix=D)
        shift_inv = tf.SimilarityTransform(translation=[shiftx, shifty])

        transformed = tf.warp(signal2D, (shift + (tform + shift_inv)).inverse,
                              order=order)

        return transformed
imageutil.py 文件源码 项目:nuts-ml 作者: maet3608 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def translate(image, dx, dy, **kwargs):
    """
    Shift image horizontally and vertically

    >>> image = np.eye(3, dtype='uint8') * 255
    >>> translate(image, 2, 1)
    array([[  0,   0,   0],
           [  0,   0, 255],
           [  0,   0,   0]], dtype=uint8)

    :param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
    :param dx: horizontal translation in pixels
    :param dy: vertical translation in pixels
    :param kwargs kwargs: Keyword arguments for the underlying scikit-image
       rotate function, e.g. order=1 for linear interpolation.
    :return: translated image
    :rtype:  numpy array with range [0,255] and dtype 'uint8'
    """
    set_default_order(kwargs)
    transmat = skt.AffineTransform(translation=(-dx, -dy))
    return skt.warp(image, transmat, preserve_range=True,
                    **kwargs).astype('uint8')
imageutil.py 文件源码 项目:nuts-ml 作者: maet3608 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def shear(image, shear_factor, **kwargs):
    """
    Shear image.

    For details see:
    http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.AffineTransform

    >>> image = np.eye(3, dtype='uint8')
    >>> rotated = rotate(image, 45)

    :param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
    :param float shear_factor: Shear factor [0, 1]
    :param kwargs kwargs: Keyword arguments for the underlying scikit-image
       warp function, e.g. order=1 for linear interpolation.
    :return: Sheared image
    :rtype: numpy array with range [0,255] and dtype 'uint8'
    """
    set_default_order(kwargs)
    transform = skt.AffineTransform(shear=shear_factor)
    return skt.warp(image, transform, preserve_range=True,
                    **kwargs).astype('uint8')
preprocessor_eval.py 文件源码 项目:HandwritingRecognition 作者: eng-tsmith 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def slant(img):
    # Load the image as a matrix
    """

    :param img:
    :return:
    """
    # Create random slant for data augmentation
    slant_factor = 0 #random.uniform(-0.2, 0.2) #TODO dataug

    # Create Afine transform
    afine_tf = tf.AffineTransform(shear=slant_factor)

    # Apply transform to image data
    img_slanted = tf.warp(img, afine_tf, order=0)
    return img_slanted
show_images.py 文件源码 项目:CNNbasedMedicalSegmentation 作者: BRML 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def sinus(image, strength):
    rows, cols = image.shape[0], image.shape[1]

    src_cols = np.linspace(0, cols, 5)
    src_rows = np.linspace(0, rows, 2)
    src_rows, src_cols = np.meshgrid(src_rows, src_cols)
    src = np.dstack([src_cols.flat, src_rows.flat])[0]

    # add sinusoidal oscillation to row coordinates
    dst_rows = src[:, 1] - np.sin(np.linspace(0, 2*np.pi, src.shape[0])) * strength 
    dst_cols = src[:, 0]
    dst_rows *= 1.
    dst_rows -= 1.5 * strength
    dst = np.vstack([dst_cols, dst_rows]).T


    tform = PiecewiseAffineTransform()
    tform.estimate(src, dst)

    out_rows = image.shape[0] #- 1.5 * 5
    out_cols = cols
    out = warp(image, tform, output_shape=(out_rows, out_cols))
    return np.array(out, dtype='float32')
perspective_transform.py 文件源码 项目:ml-traffic 作者: Zepheus 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def process(self, im):
        # if side is right flip so it becomes right
        if self.side != 'left':
            im = np.fliplr(im)

        # slope of the perspective
        slope = tan(radians(self.degrees))
        (h, w, _) = im.shape

        matrix_trans = np.array([[1, 0, 0],
                                [-slope/2, 1, slope * h / 2],
                                [-slope/w, 0, 1 + slope]])

        trans = ProjectiveTransform(matrix_trans)
        img_trans = warp(im, trans)
        if self.side != 'left':
            img_trans = np.fliplr(img_trans)
        return img_trans
DataWrapperFinal.py 文件源码 项目:EquationRecognition 作者: xyjiang94 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def image_deformation(self,image):
        random_shear_angl = np.random.random() * np.pi/6 - np.pi/12
        random_rot_angl = np.random.random() * np.pi/6 - np.pi/12 - random_shear_angl
        random_x_scale = np.random.random() * .4 + .8
        random_y_scale = np.random.random() * .4 + .8
        random_x_trans = np.random.random() * image.shape[0] / 4 - image.shape[0] / 8
        random_y_trans = np.random.random() * image.shape[1] / 4 - image.shape[1] / 8
        dx = image.shape[0]/2. \
                - random_x_scale * image.shape[0]/2 * np.cos(random_rot_angl)\
                + random_y_scale * image.shape[1]/2 * np.sin(random_rot_angl + random_shear_angl)
        dy = image.shape[1]/2. \
                - random_x_scale * image.shape[0]/2 * np.sin(random_rot_angl)\
                - random_y_scale * image.shape[1]/2 * np.cos(random_rot_angl + random_shear_angl)
        trans_mat = AffineTransform(rotation=random_rot_angl,
                                    translation=(dx + random_x_trans,
                                                 dy + random_y_trans),
                                    shear = random_shear_angl,
                                    scale = (random_x_scale,random_y_scale))
        return warp(image,trans_mat.inverse,output_shape=image.shape)
image_processing_common.py 文件源码 项目:tensorflow-litterbox 作者: rwightman 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def distort_affine_skimage(image, rotation=10.0, shear=5.0, random_state=None):
    if random_state is None:
        random_state = np.random.RandomState(None)

    rot = np.deg2rad(np.random.uniform(-rotation, rotation))
    sheer = np.deg2rad(np.random.uniform(-shear, shear))

    shape = image.shape
    shape_size = shape[:2]
    center = np.float32(shape_size) / 2. - 0.5

    pre = transform.SimilarityTransform(translation=-center)
    affine = transform.AffineTransform(rotation=rot, shear=sheer, translation=center)
    tform = pre + affine

    distorted_image = transform.warp(image, tform.params, mode='reflect')

    return distorted_image.astype(np.float32)
preprocessing.py 文件源码 项目:face-identification-tpe 作者: meownoid 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def align_face(self,
                   image,
                   face_rect, *,
                   dim=96,
                   border=0,
                   mask=FaceAlignMask.INNER_EYES_AND_BOTTOM_LIP):
        mask = np.array(mask.value)

        landmarks = self.get_landmarks(image, face_rect)
        proper_landmarks = border + dim * self.face_template[mask]
        A = np.hstack([landmarks[mask], np.ones((3, 1))]).astype(np.float64)
        B = np.hstack([proper_landmarks, np.ones((3, 1))]).astype(np.float64)
        T = np.linalg.solve(A, B).T

        wrapped = tr.warp(image,
                          tr.AffineTransform(T).inverse,
                          output_shape=(dim + 2 * border, dim + 2 * border),
                          order=3,
                          mode='constant',
                          cval=0,
                          clip=True,
                          preserve_range=True)

        return wrapped
utils.py 文件源码 项目:brats17 作者: xf4j 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def read_label(path, is_training=True):
    seg = nib.load(glob.glob(os.path.join(path, '*_seg.nii.gz'))[0]).get_data().astype(np.float32)
    # Crop to 128*128*64
    crop_size = (128, 128, 64)
    crop = [int((seg.shape[0] - crop_size[0]) / 2), int((seg.shape[1] - crop_size[1]) / 2),
            int((seg.shape[2] - crop_size[2]) / 2)]
    seg = seg[crop[0] : crop[0] + crop_size[0], crop[1] : crop[1] + crop_size[1], crop[2] : crop[2] + crop_size[2]]
    label = np.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3), dtype=np.float32)
    label[seg == 1, 0] = 1
    label[seg == 2, 1] = 1
    label[seg == 4, 2] = 1

    final_label = np.empty((16, 16, 16, 3), dtype=np.float32)
    for z in range(label.shape[3]):
        final_label[..., z] = resize(label[..., z], (16, 16, 16), mode='constant')

    # Augmentation
    if is_training:
        im_size = final_label.shape[:-1]
        translation = [np.random.uniform(-2, 2), np.random.uniform(-2, 2), np.random.uniform(-2, 2)]
        rotation = euler2mat(0, 0, np.random.uniform(-5, 5) / 180.0 * np.pi, 'sxyz')
        scale = [1, 1, 1]
        warp_mat = compose(translation, rotation, scale)
        tform_coords = get_tform_coords(im_size)
        w = np.dot(warp_mat, tform_coords)
        w[0] = w[0] + im_size[0] / 2
        w[1] = w[1] + im_size[1] / 2
        w[2] = w[2] + im_size[2] / 2
        warp_coords = w[0:3].reshape(3, im_size[0], im_size[1], im_size[2])
        for z in range(label.shape[3]):
            final_label[..., z] = warp(final_label[..., z], warp_coords)

    return final_label
cv.py 文件源码 项目:Physical-Image-Manipulation-Program 作者: philipptrenz 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def warp(img, corners):
    """
    Warpes an image by keeping its size, transforming the pixel data to 
    be distorted between the four corners.
    """

    width = len(img[0])
    height = len(img)

    src = numpy.array((
        corners['upper_left'],
        corners['lower_left'],
        corners['lower_right'],
        corners['upper_right']
    ))

    dst = numpy.array((
        (0, 0),
        (0, height),
        (width, height),
        (width, 0)
    ))

    tform = transform.ProjectiveTransform()
    tform.estimate(src, dst)

    return transform.warp(img, tform, output_shape=(height,width))
cv.py 文件源码 项目:Physical-Image-Manipulation-Program 作者: philipptrenz 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def scale_to_fit(img, size):
    """
    Scales an image to a given size by warping with no regard to the ratio.
    Returns: warped image as ndarray
    """

    width = len(img[0])
    height = len(img)

    src = numpy.array((
        (0, 0),
        (0, size[1]),
        (size[0], size[1]),
        (size[0], 0)
    ))


    dst = numpy.array((
        (0, 0),
        (0, height),
        (width, height),
        (width, 0)
    ))

    tform = transform.ProjectiveTransform()
    tform.estimate(src, dst)

    return transform.warp(img, tform, output_shape=(size[1],size[0]))





#########################################################################################################
#########################################################################################################
preprocess.py 文件源码 项目:AutoPortraitMatting 作者: PetroWu 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def gen_data(name):
    reftracker = scio.loadmat('data/images_tracker.00047.mat')['tracker']
    desttracker = scio.loadmat('data/images_tracker/'+name+'.mat')['tracker']
    refpos = np.floor(np.mean(reftracker, 0))
    xxc, yyc = np.meshgrid(np.arange(1, 1801, dtype=np.int), np.arange(1, 2001, dtype=np.int))
    #normalize x and y channels
    xxc = (xxc - 600 - refpos[0]) * 1.0 / 600
    yyc = (yyc - 600 - refpos[1]) * 1.0 / 600
    maskimg = Image.open('data/meanmask.png')
    maskc = np.array(maskimg, dtype=np.float)
    maskc = np.pad(maskc, (600, 600), 'minimum')
    # warp is an inverse transform, and so src and dst must be reversed here
    tform = transform.estimate_transform('affine', desttracker + 600, reftracker + 600)

    img_data = skio.imread('data/images_data/'+name+'.jpg')
    # save org mat
    warpedxx = transform.warp(xxc, tform, output_shape=xxc.shape)
    warpedyy = transform.warp(yyc, tform, output_shape=xxc.shape)
    warpedmask = transform.warp(maskc, tform, output_shape=xxc.shape)
    warpedxx = warpedxx[600:1400, 600:1200, :]
    warpedyy = warpedyy[600:1400, 600:1200, :]
    warpedmask = warpedmask[600:1400, 600:1200, :]
    img_h, img_w, _ = img_data.shape
    mat = np.zeros((img_h, img_w, 6), dtype=np.float)
    mat[:, :, 0] = (img_data[2] * 1.0 - 104.008) / 255
    mat[:, :, 1] = (img_data[1] * 1.0 - 116.669) / 255
    mat[:, :, 2] = (img_data[0] * 1.0 - 122.675) / 255
    scio.savemat('portraitFCN_data/' + name + '.mat', {'img':mat})
    mat_plus = np.zeros((img_h, img_w, 6), dtype=np.float)
    mat_plus[:, :, 0:3] = mat
    mat_plus[:, :, 3] = warpedxx
    mat_plus[:, :, 4] = warpedyy
    mat_plus[:, :, 5] = warpedmask
points_crop.py 文件源码 项目:kaggle-right-whale 作者: felixlaumon 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def get_head_crop(img, pt1, pt2):
    im = img.copy()
    minh = 10
    minw = 20

    x = pt1[0] - pt2[0]
    y = pt1[1] - pt2[1]
    dist = math.hypot(x, y)
    croph = int((im.shape[0] - 1.0 * dist) // 2)
    cropw = int((im.shape[1] - 2.0 * dist) // 2)
    newh = im.shape[0] - 2 * croph
    neww = im.shape[1] - 2 * cropw

    if croph <= 0 or cropw <= 0 or newh < minh or neww < minw:
        return im
    else:
        angle = math.atan2(y, x) * 180 / math.pi
        centery = 0.4 * pt1[1] + 0.6 * pt2[1]
        centerx = 0.4 * pt1[0] + 0.6 * pt2[0]
        center = (centerx, centery)
        im = rotate(im, angle, resize=False, center=center)
        imcenter = (im.shape[1] / 2, im.shape[0] / 2)
        trans = (center[0] - imcenter[0], center[1] - imcenter[1])
        tform = SimilarityTransform(translation=trans)
        im = warp(im, tform)
        im = im[croph:-croph, cropw:-cropw]
        return im
geometry.py 文件源码 项目:sudokuextract 作者: hbldh 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def warp_image_by_corner_points_projection(corner_points, image):
    """Given corner points of a Sudoku, warps original selection to a square image.

    :param corner_points:
    :type: corner_points: list
    :param image:
    :type image:
    :return:
    :rtype:

    """
    # Clarify by storing in named variables.
    top_left, top_right, bottom_left, bottom_right = np.array(corner_points)

    top_edge = np.linalg.norm(top_right - top_left)
    bottom_edge = np.linalg.norm(bottom_right - bottom_left)
    left_edge = np.linalg.norm(top_left - bottom_left)
    right_edge = np.linalg.norm(top_right - bottom_right)

    L = int(np.ceil(max([top_edge, bottom_edge, left_edge, right_edge])))
    src = np.array([top_left, top_right, bottom_left, bottom_right])
    dst = np.array([[0, 0], [L - 1, 0], [0, L - 1], [L - 1, L - 1]])

    tr = ProjectiveTransform()
    tr.estimate(dst, src)
    warped_image = warp(image, tr, output_shape=(L, L))
    out = resize(warped_image, (500, 500))

    return out
geometry.py 文件源码 项目:sudokuextract 作者: hbldh 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def warp_image_by_interp_borders(edges, image):
    left_edge, top_edge, right_edge, bottom_edge = edges

    left_edge = left_edge[::-1, :]
    bottom_edge = bottom_edge[::-1, :]

    def _mapping_fcn(points):
        map_x = (points[:, 0] / float(points[-1, 0]))
        map_y = (points[:, 1] / float(points[-1, 1]))

        top_mapping = np.array(np.round(map_x * (len(top_edge) - 1)), 'int')
        bottom_mapping = np.array(np.round(map_x * (len(bottom_edge) - 1)), 'int')
        left_mapping = np.array(np.round(map_y * (len(left_edge) - 1)), 'int')
        right_mapping = np.array(np.round(map_y * (len(right_edge) - 1)), 'int')

        map_x = np.array([map_x, map_x]).T
        map_y = np.array([map_y, map_y]).T

        p1s = (left_edge[left_mapping, :] * (1 - map_x)) + (right_edge[right_mapping, :] * map_x)
        p2s = (top_edge[top_mapping, :] * (1 - map_y)) + (bottom_edge[bottom_mapping, :] * map_y)

        return (p1s + p2s) / 2

    d_top_edge = np.linalg.norm(top_edge[0, :] - top_edge[-1, :])
    d_bottom_edge = np.linalg.norm(bottom_edge[0, :] - bottom_edge[-1, :])
    d_left_edge = np.linalg.norm(left_edge[0, :] - left_edge[-1, :])
    d_right_edge = np.linalg.norm(right_edge[0, :] - right_edge[-1, :])

    d = int(np.ceil(max([d_top_edge, d_bottom_edge, d_left_edge, d_right_edge])))
    return warp(image, _mapping_fcn, output_shape=(600, 600))
simulate.py 文件源码 项目:xcor 作者: tomography 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def add_jitter(prj, low=0, high=1):
    """Simulates jitter in projection images. The jitter
    is simulated by drawing random samples from a uniform
    distribution over the half-open interval [low, high).

    Parameters
    ----------
    prj : ndarray
        3D stack of projection images. The first dimension
        is projection axis, second and third dimensions are
        the x- and y-axes of the projection image, respectively.
    low : float, optional
        Lower boundary of the output interval. All values
        generated will be greater than or equal to low. The
        default value is 0.
    high : float
        Upper boundary of the output interval. All values
        generated will be less than high. The default value
        is 1.0.

    Returns
    -------
    ndarray
        3D stack of projection images with jitter.
    """
    from xcor.utils import scale
    from skimage import transform as tf

    # Needs scaling for skimage float operations.
    prj, scl = scale(prj)

    # Random jitter parameters are drawn from uniform distribution.
    ind = np.random.uniform(low, high, size=(prj.shape[0], 2))

    for m in range(prj.shape[0]):
        tform = tf.SimilarityTransform(translation=ind[m])
        prj[m] = tf.warp(prj[m], tform, order=0)

    # Re-scale back to original values.
    prj *= scl
    return prj
augmentation.py 文件源码 项目:UVA 作者: chiachun 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def augmentation(photoname,label):
    img = cv2.imread(photoname)
    labels = []
    images = []
    zoom1s = [0.8,1.0,1.2]
    zoom2s = [0.8,1.0,1.2]
    rotations = [0,4,8,12]
    shears = [3,6,9,12]
    flips = [False, True]
    for zoom1 in zoom1s:
        for zoom2 in zoom2s:
            for rotation in rotations:
                for shear in shears:
                    for flip in flips:
                        tform_augment = AffineTransform(scale=(1/zoom1, 1/zoom2), 
                                                        rotation=np.deg2rad(rotation), 
                                                        shear=np.deg2rad(shear))

                        img2 = warp(img, tform_augment)
                        if flip == True:
                            images.append(cv2.flip(img2,1))
                            labels.append(label)
                        else:
                            images.append(img2)
                            labels.append(label)
    return images,labels
rotate.py 文件源码 项目:py-data-augmentation 作者: taoyizhi68 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def rotate(images, x_max_rotation, y_max_rotation, z_max_rotation, img_rows, img_cols):
    assert(x_max_rotation >= 0)
    assert(y_max_rotation >= 0)
    assert(z_max_rotation >= 0)
    for i in xrange(images.shape[0]):
        x_rotation = np.random.uniform(-x_max_rotation, x_max_rotation) * np.pi / 180
        y_rotation = np.random.uniform(-y_max_rotation, y_max_rotation) * np.pi / 180
        z_rotation = np.random.uniform(-z_max_rotation, z_max_rotation) * np.pi / 180

        center_matrix1 = np.array([[1, 0, -img_cols/2.], 
                                  [0, 1, -img_rows/2.],
                                  [0, 0, 1]])

        R = np.dot(np.dot(z_matirx(z_rotation), y_matrix(y_rotation)), x_matrix(x_rotation))
        rotate_matrix = np.array([[R[0][0], R[0][1], 0], 
                                  [R[1][0], R[1][1], 0],
                                  [0,       0,       1]])
        #print rotate_matrix
        center_matrix2 = np.array([[1, 0, img_cols/2.], 
                                  [0, 1, img_rows/2.],
                                  [0, 0, 1]]) 

        center_trans1 = transform.AffineTransform(center_matrix1)
        rotate_trans = transform.AffineTransform(rotate_matrix)
        center_trans2 = transform.AffineTransform(center_matrix2)

        affine_trans = center_trans1 + rotate_trans + center_trans2
        images[i] = transform.warp(images[i], affine_trans, mode='edge')
preprocessor.py 文件源码 项目:HandwritingRecognition 作者: eng-tsmith 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def slant(img):
    """
    Creates random slant on images for data augmentation
    :param img: image
    :return: slanted image
    """
    # Create random slant for data augmentation
    slant_factor = random.uniform(-0.1, 0.1)

    # Create Afine transform
    afine_tf = tf.AffineTransform(shear=slant_factor)

    # Apply transform to image data
    img_slanted = tf.warp(img, afine_tf, order=0)
    return img_slanted
alignment.py 文件源码 项目:face-preprocess-tools 作者: joyhuang9473 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def alignment(filePath, points, ref_points):
    '''
    @brief: ??????????????
    '''
    assert(len(points) == len(ref_points))    
    num_point = len(ref_points) / 2
    #??????
    dst = np.empty((num_point, 2), dtype = np.int)
    k = 0
    for i in range(num_point):
        for j in range(2):
            dst[i][j] = ref_points[k]
            k = k+1
    #???????
    src = np.empty((num_point, 2), dtype = np.int)
    k = 0
    for i in range(num_point):
        for j in range(2):
            src[i][j] = points[k]
            k = k+1
    #???????????????????
    tfrom = tf.estimate_transform('affine', dst,src)
    #?opencv???,????????,????M
#    pts1 = np.float32([[src[0][0],src[0][1]],[src[1][0],src[1][1]],[src[2][0],src[2][1]]])
#    pts2 = np.float32([[dst[0][0],dst[0][1]],[dst[1][0],dst[1][1]],[dst[2][0],dst[2][1]]])
#    M = cv2.getAffineTransform(pts2,pts1)
    #?????????????    
    pts3 = np.float32([[src[0][0],src[0][1]],[src[1][0],src[1][1]],[src[2][0],src[2][1]],[src[3][0],src[3][1]],[src[4][0],src[4][1]]])
    pts4 = np.float32([[dst[0][0],dst[0][1]],[dst[1][0],dst[1][1]],[dst[2][0],dst[2][1]],[dst[3][0],dst[3][1]],[dst[4][0],dst[4][1]]])
    N = compute_affine_transform(pts4, pts3)
    #
    im = skimage.io.imread(filePath)

    if im.ndim == 3:
        rows, cols, ch = im.shape
    else:
        rows, cols = im.shape
    warpimage_cv2 = cv2.warpAffine(im, N, (cols, rows))
    warpimage = tf.warp(im, inverse_map = tfrom)

    return warpimage, warpimage_cv2
astroalign.py 文件源码 项目:astroalign 作者: toros-astro 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def apply_transform(transform, source, target):
    """Applies the transformation ``transform`` to ``source``.

    The output image will have the same shape as ``target``.

    Args:
        transform: A scikit-image ``SimilarityTransform`` object.
        source (numpy array): A 2D numpy array of the source image to be
            transformed.
        target (numpy array): A 2D numpy array of the target image. Only used
            to set the output image shape.

    Return:
        A numpy 2D array of the transformed source. If source is a masked array
        the returned image will also be a masked array with outside pixels set
        to True.
    """

    from skimage.transform import warp
    aligned_image = warp(source, inverse_map=transform.inverse,
                         output_shape=target.shape, order=3, mode='constant',
                         cval=_np.median(source), clip=False,
                         preserve_range=False)

    if isinstance(source, _np.ma.MaskedArray):
        # it could be that source's mask is just set to False
        if isinstance(source.mask, _np.ndarray):
            aligned_image_mask = warp(source.mask.astype('float32'),
                                      inverse_map=transform.inverse,
                                      output_shape=target.shape,
                                      cval=1.0)
            aligned_image_mask = aligned_image_mask > 0.4
            aligned_image = _np.ma.array(aligned_image,
                                         mask=aligned_image_mask)
        else:
            # If source is masked array with mask set to false, we
            # return the same
            aligned_image = _np.ma.array(aligned_image)
    return aligned_image
warpFace.py 文件源码 项目:FaceAnalysis 作者: ElliotSalisbury 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def warpFace(im, oldLandmarks, newLandmarks, justFace=False, output_shape=None):
    print("warping face")
    if not justFace:
        cornerPts = np.array([(0, 0), (im.shape[1], 0), (im.shape[1], im.shape[0]), (0, im.shape[0])])

        oldLandmarks = np.append(oldLandmarks, cornerPts, axis=0)
        newLandmarks = np.append(newLandmarks, cornerPts, axis=0)

    tform = PiecewiseAffineTransform()
    tform.estimate(newLandmarks,oldLandmarks)

    warped = warp(im, tform, output_shape=output_shape)
    warped = skimage.img_as_ubyte(warped)
    return warped
poc.py 文件源码 项目:kaggle-satellite-imagery-feature-detection 作者: toshi-k 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def test(test_func):

    lena = imread('lena512.png')

    n = 100

    error_all = np.zeros([n])
    pbar = progressbar.ProgressBar(max_value=n)

    for i in range(n):

        pbar.update(i+1)

        x_true = np.random.random()*6-5
        y_true = np.random.random()*6-5

        # ex) left:5, up:30 => translation=(5, 30)
        t_form = tf.SimilarityTransform(translation=(x_true, y_true))
        lena_shift = tf.warp(lena, t_form)

        a1 = np.random.randint(10, 50)
        a2 = np.random.randint(10, 50)
        a3 = np.random.randint(10, 50)
        a4 = np.random.randint(10, 50)

        img1 = lena[a1:-a2, a3:-a4]
        img2 = lena_shift[a1:-a2, a3:-a4]

        x_est, y_est = test_func(img1, img2)

        # print("x: {0:.3f}, x: {0:.3f}".format(x_true, y_true))
        # print("x: {0:.3f}, y: {0:.3f}".format(x_est, y_est))

        value = math.sqrt((x_true - x_est)**2 + (y_true - y_est)**2)
        error_all[i] = value

    ave = np.average(error_all)
    std = np.std(error_all)

    print("\terror: {0:.3f} +- {1:.3f}".format(ave, std))

#------------------------------
# main
#------------------------------
input_sixteen.py 文件源码 项目:kaggle-satellite-imagery-feature-detection 作者: toshi-k 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _align_two_rasters(img1, img2):

    p1 = normalize(img1[10:-10, 10:-10, 0].astype(np.float32))
    p2 = normalize(img2[10:-10, 10:-10, 7].astype(np.float32))

    x, y = poc(p2, p1)
    print('x: {0:.5f} y: {1:.5f}'.format(x, y))

    t_form = tf.SimilarityTransform(translation=(x, y))
    img3 = tf.warp(img2, t_form)

    return img3
tform.py 文件源码 项目:rasl 作者: welch 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def imtransform(self, image, order=3, cval=0, *args, **kwargs):
        """tranform an image, with output image having same shape as input.

        This is implemented as an equivalent to MATLAB's
        imtransform(image, fliptform(tform)), to agree with the
        matrices generated by toolbox.parameters_to_projective_matrix.

        Note 1: It is *backwards* from the usual sense of tf.warp in skimage.

        Note 2: cval is not used during warping. boundaries are filled
        with NaN, and the transformed image has NaNs replaced with
        cval. This avoids made-up data at the expense of eroding
        boundaries.

        """
        # call warp with explicit matrix so we get the optimized behavior
        if not np.all(image == image):
            raise ValueError("NAN given to imtransform"+str(image))
        timage = tf.warp(image, self.params, order=order, mode='constant',
                         cval=np.nan, preserve_range=True,
                         output_shape=self.output_shape, *args, **kwargs)
        if cval == 0:
            timage = np.nan_to_num(timage)
        elif np.isfinite(cval):
            timage = np.where(np.isfinite(timage), timage, cval)
        return timage


问题


面经


文章

微信
公众号

扫码关注公众号