def _atlas_single_channel(self, channel, dims):
scale = (float(dims.tile_width) / float(self.aics_image.size_x), float(dims.tile_height) / float(self.aics_image.size_y))
channel_data = self.aics_image.get_image_data("XYZ", C=channel)
# renormalize
channel_data = channel_data.astype(np.float32)
channel_data *= 255.0/channel_data.max()
atlas = np.zeros((dims.atlas_width, dims.atlas_height))
i = 0
for row in range(dims.rows):
top_bound, bottom_bound = (dims.tile_height * row), (dims.tile_height * (row + 1))
for col in range(dims.cols):
if i < self.aics_image.size_z:
left_bound, right_bound = (dims.tile_width * col), (dims.tile_width * (col + 1))
tile = zoom(channel_data[:,:,i], scale)
atlas[left_bound:right_bound, top_bound:bottom_bound] = tile.astype(np.uint8)
i += 1
else:
break
# transpose to YX for input into CYX arrays
return atlas.transpose((1, 0))
python类zoom()的实例源码
def resize(orig, factor, method="nearest"):
"""
Scales a numpy array to a new size using a specified scaling method
:param orig: n-dimen numpy array to resize
:param factor: integer, double, or n-tuple to scale orig by
:param method: string, interpolation method to use when resizing. Options are "nearest",
"bilinear", and "cubic". Default is "nearest"
:return: n-dimen numpy array
"""
method_dict = {'nearest': 0, 'bilinear': 1, 'cubic': 2}
if method.lower() not in method_dict:
raise ValueError("Invalid interpolation method. Options are: " + ", ".join(method_dict.keys()))
try:
return zoom(orig, factor, order=method_dict[method.lower()])
except RuntimeError:
# raised by zoom when factor length does not match orig.shape length
raise ValueError("Factor sequence length does not match input length")
def estimate_local_whitelevel(image, zoom=0.5, perc=80, range=20, debug=0):
'''flatten it by estimating the local whitelevel
zoom for page background estimation, smaller=faster, default: %(default)s
percentage for filters, default: %(default)s
range for filters, default: %(default)s
'''
m = interpolation.zoom(image, zoom)
m = filters.percentile_filter(m, perc, size=(range, 2))
m = filters.percentile_filter(m, perc, size=(2, range))
m = interpolation.zoom(m, 1.0/zoom)
if debug > 0:
plt.clf()
plt.title("m after remove noise")
plt.imshow(m, vmin=0, vmax=1)
raw_input("PRESS ANY KEY TO CONTINUE.")
w, h = np.minimum(np.array(image.shape), np.array(m.shape))
flat = np.clip(image[:w,:h]-m[:w,:h]+1,0,1)
if debug > 0:
plt.clf()
plt.title("flat after clip")
plt.imshow(flat,vmin=0,vmax=1)
raw_input("PRESS ANY KEY TO CONTINUE.")
return flat
def draw_ellipse(shape, radius, center, FWHM, noise=0):
sigma = FWHM / 2.35482
cutoff = 2 * FWHM
# draw a circle
R = max(radius)
zoom_factor = np.array(radius) / R
size = int((R + cutoff)*2)
c = size // 2
y, x = np.meshgrid(*([np.arange(size)] * 2), indexing='ij')
h = np.sqrt((y - c)**2+(x - c)**2) - R
mask = np.abs(h) < cutoff
im = np.zeros((size,)*2, dtype=np.float)
im[mask] += np.exp((h[mask] / sigma)**2/-2)/(sigma*np.sqrt(2*np.pi))
# zoom so that radii are ok
with warnings.catch_warnings():
warnings.simplefilter("ignore")
im = zoom(im, zoom_factor)
# shift and make correct shape
center_diff = center - np.array(center_of_mass(im))
left_padding = np.round(center_diff).astype(np.int)
subpx_shift = center_diff - left_padding
im = shift(im, subpx_shift)
im = crop_pad(im, -left_padding, shape)
im[im < 0] = 0
assert_almost_equal(center_of_mass(im), center, decimal=2)
if noise > 0:
im += np.random.random(shape) * noise * im.max()
return (im / im.max() * 255).astype(np.uint8)
def draw_ellipsoid(shape, radius, center, FWHM, noise=0):
sigma = FWHM / 2.35482
cutoff = 2 * FWHM
# draw a sphere
R = max(radius)
zoom_factor = np.array(radius) / R
size = int((R + cutoff)*2)
c = size // 2
z, y, x = np.meshgrid(*([np.arange(size)] * 3), indexing='ij')
h = np.sqrt((z - c)**2+(y - c)**2+(x - c)**2) - R
mask = np.abs(h) < cutoff
im = np.zeros((size,)*3, dtype=np.float)
im[mask] += np.exp((h[mask] / sigma)**2/-2)/(sigma*np.sqrt(2*np.pi))
# zoom so that radii are ok
with warnings.catch_warnings():
warnings.simplefilter("ignore")
im = zoom(im, zoom_factor)
# shift and make correct shape
center_diff = center - np.array(center_of_mass(im))
left_padding = np.round(center_diff).astype(np.int)
subpx_shift = center_diff - left_padding
im = shift(im, subpx_shift)
im = crop_pad(im, -left_padding, shape)
im[im < 0] = 0
assert_almost_equal(center_of_mass(im), center, decimal=2)
if noise > 0:
im += np.random.random(shape) * noise * im.max()
return (im / im.max() * 255).astype(np.uint8)
def testtime_augmentation(image, label):
labels = []
images = []
rotations = [0]
flips = [[0,0],[1,0],[0,1],[1,1]]
shifts = [[0,0]]
zooms = [1]
for r in rotations:
for f in flips:
for s in shifts:
for z in zooms:
image2 = np.array(image)
if f[0]:
image2[:,:] = image2[::-1,:]
if f[1]:
image2 = image2.transpose(1,0)
image2[:,:] = image2[::-1,:]
image2 = image2.transpose(1,0)
#rotate(image2, r, reshape=False, output=image2)
#image3 = zoom(image2, [z,z])
#image3 = crop_or_pad(image3, P.INPUT_SIZE, 0)
#image2 = image3
# #shift(image2, [s[0],s[1]], output=image2)
images.append([image2]) #Adds color channel dimension!
labels.append(label)
return images, labels
def reshape_maps_zoom( maps, new_dim, interp_order=1 ):
res = []
for k in range(maps.shape[0]):
f_map = maps[k,:,:]
scale = tuple(np.array(new_dim, dtype=float))/np.array(f_map.shape)
out = zoom( f_map, scale, order=interp_order )
res.append( out )
return np.array(res)
# Reshape feature maps
def augment(images):
pixels = images[0].shape[1]
center = pixels/2.-0.5
random_flip_x = FLIP_X and np.random.randint(2) == 1
random_flip_y = FLIP_Y and np.random.randint(2) == 1
# Translation shift
shift_x = np.random.uniform(*TRANS_RANGE)
shift_y = np.random.uniform(*TRANS_RANGE)
rotation_degrees = np.random.uniform(*ROT_RANGE)
zoom_factor = np.random.uniform(*ZOOM_RANGE)
#zoom_factor = 1 + (zoom_f/2-zoom_f*np.random.random())
if CV2_AVAILABLE:
M = cv2.getRotationMatrix2D((center, center), rotation_degrees, zoom_factor)
M[0, 2] += shift_x
M[1, 2] += shift_y
for i in range(len(images)):
image = images[i]
if random_flip_x:
image[:,:] = image[:,::-1,]
if random_flip_y:
image = image.transpose(1,0)
image[:,:] = image[::-1,:]
image = image.transpose(1,0)
if i==0: # lung
rotate(image, rotation_degrees, reshape=False, output=image, cval=-3000)
else:# truth and outside
rotate(image, rotation_degrees, reshape=False, output=image)
#image2 = zoom(image, [zoom_factor,zoom_factor])
image2 = crop_or_pad(image, pixels, -3000)
shift(image2, [shift_x,shift_y], output=image)
images[i] = image
return images
def pixelize(data, factor=0.1, order=0, mode="nearest", add_noise=True, noise_factor=0.3):
h, w = np.shape(data)[0:2]
if add_noise:
noise = noise_factor * np.random.random(np.shape(data)) - 0.5*noise_factor
data += noise
factor_h = h / float(int(h)/(int(1/factor)))
factor_w = w / float(int(w)/(int(1/factor)))
small = sci.zoom(data, factor, order=order, mode=mode)
pixelized = sci.zoom(small, [factor_h, factor_w], order=order, mode=mode)
return pixelized
def resize_to_shape(data, height, width, order=1):
h, w = np.shape(data)[0:2]
factor_h = height / float(h)
factor_w = width / float(w)
if len(np.shape(data)) == 3:
scale_factor = [factor_h, factor_w, 1.0]
else:
scale_factor = [factor_h, factor_w]
return sci.zoom(data, scale_factor, order=order)
def get_algo_result_from_dir(algo_dir, scene):
fname = get_fname_algo_result(algo_dir, scene)
algo_result = file_io.read_file(fname)
if scene.gt_scale != 1:
algo_result = sci.zoom(algo_result, scene.gt_scale, order=0)
return algo_result
def testtime_augmentation(image, label):
labels = []
images = []
rotations = [0]
flips = [[0,0],[1,0],[0,1],[1,1]]
shifts = [[0,0]]
zooms = [1]
for r in rotations:
for f in flips:
for s in shifts:
for z in zooms:
image2 = np.array(image)
if f[0]:
image2[:,:] = image2[::-1,:]
if f[1]:
image2 = image2.transpose(1,0)
image2[:,:] = image2[::-1,:]
image2 = image2.transpose(1,0)
#rotate(image2, r, reshape=False, output=image2)
#image3 = zoom(image2, [z,z])
#image3 = crop_or_pad(image3, P.INPUT_SIZE, 0)
#image2 = image3
# #shift(image2, [s[0],s[1]], output=image2)
images.append([image2]) #Adds color channel dimension!
labels.append(label)
return images, labels
def zoom(image, factor):
w, h = image.shape
new_w, new_h = np.ceil(factor * w), np.ceil(factor * h)
if new_h % 2 != h % 2:
new_h -= 1
if new_w % 2 != w % 2:
new_w -= 1
scaled = resize(image, (new_w, new_h), order=1, mode='nearest')
# crop
pad_x = (new_w - w) / 2
pad_y = (new_h - h) / 2
return scaled[pad_x:new_w - pad_x, pad_y:new_h - pad_y]
def __init__(self, nseg = 12, nPixels = 256, pattern=None):
self.nSegments = nseg
self.nPixels = nPixels
self.DMsegs = np.zeros((self.nSegments, self.nSegments))
self.zern = Zernike_func(nPixels/2)
self.borders = np.linspace(0,self.nPixels,num=self.nSegments+1).astype(int)
if pattern is None:
self.pattern = np.zeros((nPixels,nPixels))
else:
zoom = 256./np.float(pattern.shape[0])
MOD = interpolation.zoom(pattern,zoom,order=0,mode='nearest')
self.pattern = MOD
def _get_sequence(self, verbose=False):
trajectory_x, trajectory_y = self._get_random_trajectory()
# Minibatch data
if self.random_background:
out_sequence = self._rng.rand(self.seq_length + 1,
self.frame_size[0],
self.frame_size[1], 1)
else:
out_sequence = np.zeros((self.seq_length + 1,
self.frame_size[0],
self.frame_size[1], 1),
dtype=np.float32)
for digit_id in range(self.num_digits):
# Get random digit from dataset
curr_data_idx = self._rng.randint(0, self._MNIST_data.shape[0] - 1)
digit_image = self._MNIST_data[curr_data_idx]
zoom_factor = int(self.digits_sizes[digit_id] / 28)
if zoom_factor != 1:
digit_image = zoom(digit_image, zoom_factor)
digit_size = digit_image.shape[0]
# Generate video
digit_image = np.expand_dims(digit_image, -1)
# Iterate over seq_length + 1 to account for the extra frame
# that is returned as a target
for i, (top, left) in enumerate(zip(trajectory_y[:, digit_id],
trajectory_x[:, digit_id])):
bottom = top + digit_size
right = left + digit_size
out_sequence[i, top:bottom, left:right, :] = np.maximum(
out_sequence[i, top:bottom, left:right, :], digit_image)
return out_sequence
def pango_render_string(s,spec=None,fontfile=None,size=None,bg=(0.0,0.0,0.0),fg=(0.9,0.9,0.9),pad=5,
markup=1,scale=2.0,aspect=1.0,rotation=0.0):
"""Render a string using Cairo and the Pango text rendering interface. Fonts can either be given
as a fontfile or as a fontname. Size should be in pixels (?). You can specify a background and
foreground color as RGB floating point triples. (Currently unimplemented.)"""
S = pango.SCALE
face = None
if fontfile is not None: raise Exception("can't load ttf file into Pango yet; use fontname")
# make a guess at the size
w = max(100,int(scale*size*len(s)))
h = max(100,int(scale*size*1.5))
# possibly run through twice to make sure we get the right size buffer
for round in range(2):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,w,h)
cr = cairo.Context(surface)
if spec is not None: fd = pango.FontDescription(spec)
else: fd = pango.FontDescription()
if size is not None: fd.set_size(int(scale*size*S))
pcr = pangocairo.CairoContext(cr)
layout = pcr.create_layout()
layout.set_font_description(fd)
if not markup:
layout.set_text(s)
else:
layout.set_markup(s)
((xbear,ybear,tw,th),_) = layout.get_pixel_extents()
# print(xbear, ybear, tw, th)
tw = tw+2*pad
th = th+2*pad
if tw<=w and th<=h: break
w = tw
h = th
cr.set_source_rgb(*bg)
cr.rectangle(0,0,w,h)
cr.fill()
cr.move_to(-xbear+pad,-ybear+pad)
cr.set_source_rgb(*fg)
pcr.show_layout(layout)
data = surface.get_data()
data = bytearray(data)
a = array(data,'B')
a.shape = (h,w,4)
a = a[:th,:tw,:3]
a = a[:,:,::-1]
if rotation!=0.0: a = rotate(a,rotation,order=1)
a = zoom(a,(aspect/scale,1.0/scale/aspect,1.0),order=1)
return a
def test_densecrf(image_dir, desc_dir, filepath_to_id_path, out_dir):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
filepath_to_id = json.load(open(filepath_to_id_path))
params = {
"bilateral_pairwise_weight": 8,
"bilateral_theta_lab_ab": 3.0,
"bilateral_theta_lab_l": 0.5,
"bilateral_theta_xy": 0.5,
"min_dim": 550,
"n_crf_iters": 10,
"splat_triangle_weight": 1,
"unary_prob_padding": 1e-05
}
desc_store = DescriptorStoreMemmap(desc_dir, readonly=True)
# VGG-16
stride = config.NETWORK_CONFIGS['209']['effective_stride']
# Go through all images
for filepath, img_id in filepath_to_id.iteritems():
filename = os.path.basename(filepath)
bname, ext = os.path.splitext(filename)
img_path = os.path.join(image_dir, filename)
image = np.array(Image.open(img_path))
# Compute the expected output size
h, w = image.shape[:2]
prob_width = w // stride
prob_height = h // stride
img_id = int(img_id)
prob = desc_store.get(img_id)
prob = np.reshape(prob, (config.NLABELS, prob_height, prob_width))
print prob.shape
zoom_factor = (
1,
float(h) / prob_height,
float(w) / prob_width,
)
prob_resized = zoom(prob, zoom=zoom_factor, order=1)
labels_crf = densecrf_map(image, prob_resized.copy(), params)
for l in range(config.NLABELS):
img_mask = prob_resized[l, :, :][:, :, np.newaxis]
red_img = np.array([255, 0, 0])[np.newaxis, np.newaxis, :]
new_img = red_img * img_mask + image * (1 - img_mask)
imsave(
os.path.join(out_dir, '%s-prob-%s-crf%s' % (bname, config.LABEL_TO_NAME[l], ext)),
new_img
)
imsave(
os.path.join(out_dir, '%s-labels-crf%s' % (bname, ext)),
labels_to_color(labels_crf)
)
imsave(os.path.join(out_dir, filename), image)
def __init__(self, scaled=False, dtype=None, zoom=None, gray=False,
pth=None):
"""Initialise an ExampleImages object.
Parameters
----------
scaled : bool, optional (default False)
Flag indicating whether images should be on the range [0,...,255]
with np.uint8 dtype (False), or on the range [0,...,1] with
np.float32 dtype (True)
dtype : data-type or None, optional (default None)
Desired data type of images. If `scaled` is True and `dtype` is an
integer type, the output data type is np.float32
zoom : float or None, optional (default None)
Optional support rescaling factor to apply to the images
gray : bool, optional (default False)
Flag indicating whether RGB images should be converted to grayscale
pth : string or None (default None)
Path to directory containing image files. If the value is None the
path points to a set of example images that are included with the
package.
"""
self.scaled = scaled
self.dtype = dtype
self.zoom = zoom
self.gray = gray
if pth is None:
self.bpth = os.path.join(os.path.dirname(__file__), 'data')
else:
self.bpth = pth
self.imglst = []
self.grpimg = {}
for dirpath, dirnames, filenames in os.walk(self.bpth):
# It would be more robust and portable to use
# pathlib.PurePath.relative_to
prnpth = dirpath[len(self.bpth)+1:]
for f in filenames:
fpth = os.path.join(dirpath, f)
if imghdr.what(fpth) is not None:
gpth = os.path.join(prnpth, f)
self.imglst.append(gpth)
if prnpth not in self.grpimg:
self.grpimg[prnpth] = []
self.grpimg[prnpth].append(gpth)
def data(self, raw=False, bgr2rgb=True, resize=True, order=1):
"""Read image data from file and return as numpy array."""
self._fh.seek(self.data_offset)
if raw:
return self._fh.read(self.data_size)
elif self.compression and self.compression < RAW_COMPRESSION_VALUE:
if self.compression not in DECOMPRESS:
raise ValueError("compression unknown or not supported")
# TODO: iotest this
data = self._fh.read(self.data_size)
data = DECOMPRESS[self.compression](data)
if self.compression == 2:
# LZW
data = numpy.fromstring(data, self.dtype)
else:
dtype = numpy.dtype(self.dtype)
data = self._fh.read_array(dtype, self.data_size // dtype.itemsize)
data = data.reshape(self.stored_shape)
if self.stored_shape == self.shape or not resize:
if bgr2rgb and self.stored_shape[-1] in (3, 4):
tmp = data[..., 0].copy()
data[..., 0] = data[..., 2]
data[..., 2] = tmp
return data
# sub / supersampling
factors = [j / i for i, j in zip(self.stored_shape, self.shape)]
factors = [(1.0 if abs(1.0-f) < 0.0001 else f) for f in factors]
shape = list(self.stored_shape)
# remove leading dimensions with factor 1.0 for speed
for factor in factors:
if factor != 1.0:
break
shape = shape[1:]
factors = factors[1:]
data.shape = shape
# resize RGB components separately for speed
if shape[-1] in (3, 4) and factors[-1] == 1.0:
factors = factors[:-1]
old = data
data = numpy.empty(self.shape, self.dtype[-2:])
for i in range(shape[-1]):
j = {0: 2, 1: 1, 2: 0, 3: 3}[i] if bgr2rgb else i
data[..., i] = zoom(old[..., j], zoom=factors, order=order)
else:
data = zoom(data, zoom=factors, order=order)
data.shape = self.shape
return data
def augment(images):
pixels = images[0].shape[1]
center = pixels/2.-0.5
random_flip_x = P.AUGMENTATION_PARAMS['flip'] and np.random.randint(2) == 1
random_flip_y = P.AUGMENTATION_PARAMS['flip'] and np.random.randint(2) == 1
# Translation shift
shift_x = np.random.uniform(*P.AUGMENTATION_PARAMS['translation_range'])
shift_y = np.random.uniform(*P.AUGMENTATION_PARAMS['translation_range'])
rotation_degrees = np.random.uniform(*P.AUGMENTATION_PARAMS['rotation_range'])
zoom_factor = np.random.uniform(*P.AUGMENTATION_PARAMS['zoom_range'])
#zoom_factor = 1 + (zoom_f/2-zoom_f*np.random.random())
if CV2_AVAILABLE:
M = cv2.getRotationMatrix2D((center, center), rotation_degrees, zoom_factor)
M[0, 2] += shift_x
M[1, 2] += shift_y
for i in range(len(images)):
image = images[i]
if CV2_AVAILABLE:
#image = image.transpose(1,2,0)
image = cv2.warpAffine(image, M, (pixels, pixels))
if random_flip_x:
image = cv2.flip(image, 0)
if random_flip_y:
image = cv2.flip(image, 1)
#image = image.transpose(2,0,1)
images[i] = image
else:
if random_flip_x:
#image = image.transpose(1,0)
image[:,:] = image[::-1,:]
#image = image.transpose(1,0)
if random_flip_y:
image = image.transpose(1,0)
image[:,:] = image[::-1,:]
image = image.transpose(1,0)
rotate(image, rotation_degrees, reshape=False, output=image)
#image2 = zoom(image, [zoom_factor,zoom_factor])
image2 = crop_or_pad(image, pixels, -3000)
shift(image2, [shift_x,shift_y], output=image)
#affine_transform(image, np.array([[zoom_x,0], [0,zoom_x]]), output=image)
#z = AffineTransform(scale=(2,2))
#image = warp(image, z.params)
images[i] = image
return images
def augment(images):
pixels = images[0].shape[1]
center = pixels/2.-0.5
random_flip_x = P.AUGMENTATION_PARAMS['flip'] and np.random.randint(2) == 1
random_flip_y = P.AUGMENTATION_PARAMS['flip'] and np.random.randint(2) == 1
# Translation shift
shift_x = np.random.uniform(*P.AUGMENTATION_PARAMS['translation_range'])
shift_y = np.random.uniform(*P.AUGMENTATION_PARAMS['translation_range'])
rotation_degrees = np.random.uniform(*P.AUGMENTATION_PARAMS['rotation_range'])
zoom_factor = np.random.uniform(*P.AUGMENTATION_PARAMS['zoom_range'])
#zoom_factor = 1 + (zoom_f/2-zoom_f*np.random.random())
if CV2_AVAILABLE:
M = cv2.getRotationMatrix2D((center, center), rotation_degrees, zoom_factor)
M[0, 2] += shift_x
M[1, 2] += shift_y
for i in range(len(images)):
image = images[i]
if CV2_AVAILABLE:
#image = image.transpose(1,2,0)
image = cv2.warpAffine(image, M, (pixels, pixels))
if random_flip_x:
image = cv2.flip(image, 0)
if random_flip_y:
image = cv2.flip(image, 1)
#image = image.transpose(2,0,1)
images[i] = image
else:
if random_flip_x:
#image = image.transpose(1,0)
image[:,:] = image[::-1,:]
#image = image.transpose(1,0)
if random_flip_y:
image = image.transpose(1,0)
image[:,:] = image[::-1,:]
image = image.transpose(1,0)
rotate(image, rotation_degrees, reshape=False, output=image)
#image2 = zoom(image, [zoom_factor,zoom_factor])
image2 = crop_or_pad(image, pixels, -3000)
shift(image2, [shift_x,shift_y], output=image)
#affine_transform(image, np.array([[zoom_x,0], [0,zoom_x]]), output=image)
#z = AffineTransform(scale=(2,2))
#image = warp(image, z.params)
images[i] = image
return images
def pango_render_string(s,spec=None,fontfile=None,size=None,bg=(0.0,0.0,0.0),fg=(0.9,0.9,0.9),pad=5,
markup=1,scale=2.0,aspect=1.0,rotation=0.0):
"""Render a string using Cairo and the Pango text rendering interface. Fonts can either be given
as a fontfile or as a fontname. Size should be in pixels (?). You can specify a background and
foreground color as RGB floating point triples. (Currently unimplemented.)"""
S = pango.SCALE
face = None
if fontfile is not None: raise Exception("can't load ttf file into Pango yet; use fontname")
# make a guess at the size
w = max(100,int(scale*size*len(s)))
h = max(100,int(scale*size*1.5))
# possibly run through twice to make sure we get the right size buffer
for round in range(2):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,w,h)
cr = cairo.Context(surface)
if spec is not None: fd = pango.FontDescription(spec)
else: fd = pango.FontDescription()
if size is not None: fd.set_size(int(scale*size*S))
pcr = pangocairo.CairoContext(cr)
layout = pcr.create_layout()
layout.set_font_description(fd)
if not markup:
layout.set_text(s)
else:
layout.set_markup(s)
((xbear,ybear,tw,th),_) = layout.get_pixel_extents()
# print(xbear, ybear, tw, th)
tw = tw+2*pad
th = th+2*pad
if tw<=w and th<=h: break
w = tw
h = th
cr.set_source_rgb(*bg)
cr.rectangle(0,0,w,h)
cr.fill()
cr.move_to(-xbear+pad,-ybear+pad)
cr.set_source_rgb(*fg)
pcr.show_layout(layout)
data = surface.get_data()
data = bytearray(data)
a = array(data,'B')
a.shape = (h,w,4)
a = a[:th,:tw,:3]
a = a[:,:,::-1]
if rotation!=0.0: a = rotate(a,rotation,order=1)
a = zoom(a,(aspect/scale,1.0/scale/aspect,1.0),order=1)
return a
def pango_render_string(s,spec=None,fontfile=None,size=None,bg=(0.0,0.0,0.0),fg=(0.9,0.9,0.9),pad=5,
markup=1,scale=2.0,aspect=1.0,rotation=0.0):
"""Render a string using Cairo and the Pango text rendering interface. Fonts can either be given
as a fontfile or as a fontname. Size should be in pixels (?). You can specify a background and
foreground color as RGB floating point triples. (Currently unimplemented.)"""
S = pango.SCALE
face = None
if fontfile is not None: raise Exception("can't load ttf file into Pango yet; use fontname")
# make a guess at the size
w = max(100,int(scale*size*len(s)))
h = max(100,int(scale*size*1.5))
# possibly run through twice to make sure we get the right size buffer
for round in range(2):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,w,h)
cr = cairo.Context(surface)
if spec is not None: fd = pango.FontDescription(spec)
else: fd = pango.FontDescription()
if size is not None: fd.set_size(int(scale*size*S))
pcr = pangocairo.CairoContext(cr)
layout = pcr.create_layout()
layout.set_font_description(fd)
if not markup:
layout.set_text(s)
else:
layout.set_markup(s)
((xbear,ybear,tw,th),_) = layout.get_pixel_extents()
# print(xbear, ybear, tw, th)
tw = tw+2*pad
th = th+2*pad
if tw<=w and th<=h: break
w = tw
h = th
cr.set_source_rgb(*bg)
cr.rectangle(0,0,w,h)
cr.fill()
cr.move_to(-xbear+pad,-ybear+pad)
cr.set_source_rgb(*fg)
pcr.show_layout(layout)
data = surface.get_data()
data = bytearray(data)
a = array(data,'B')
a.shape = (h,w,4)
a = a[:th,:tw,:3]
a = a[:,:,::-1]
if rotation!=0.0: a = rotate(a,rotation,order=1)
a = zoom(a,(aspect/scale,1.0/scale/aspect,1.0),order=1)
return a