def test_execute(self, transcoder, region_test_jpg):
image_request = Mock(
file_path = region_test_jpg,
region_request_type = FULL,
region_pixel_x = 0,
region_pixel_y = 0,
region_pixel_w = 6000,
region_pixel_h = 8000,
size_request_type=FULL,
width = 6000,
height = 8000,
mirror = False,
rotation = 0.0,
quality = 'default',
format='jpg'
)
stream = transcoder.execute(image_request)
pillow_image = parse_stream(stream)
assert pillow_image.size == (6000,8000)
python类mirror()的实例源码
def apply_effects(image, effects):
"""method to apply effects to original image from list of effects
"""
for effect in effects:
gray = ImageOps.grayscale(image)
# dictionary with all the availble effects
all_effects = {
'BLUR': image.filter(ImageFilter.BLUR),
'CONTOUR': image.filter(ImageFilter.CONTOUR),
'EMBOSS': image.filter(ImageFilter.EMBOSS),
'SMOOTH': image.filter(ImageFilter.SMOOTH),
'HULK': ImageOps.colorize(gray, (0, 0, 0, 0), '#00ff00'),
'FLIP': ImageOps.flip(image),
'MIRROR': ImageOps.mirror(image),
'INVERT': ImageOps.invert(image),
'SOLARIZE': ImageOps.solarize(image),
'GREYSCALE': ImageOps.grayscale(image),
}
phedited = all_effects[effect]
image = phedited
return phedited
def mirror(np_img):
image = Image.fromarray(np_img)
image = ImageOps.mirror(image)
data = np.asarray(image)
return data
def main():
image = Image.open(args.input_path)
image = ImageOps.mirror(image)
# data = np.asarray(image)
# print(type(data))
# print(type(data[0,0,0]))
# print(data.shape)
image.save(args.output_path)
def test_mirror(self, transcoder, pillow_image):
pillow_image = mirror(pillow_image)
assert pillow_image.getpixel((299,399)) == RED
assert pillow_image.getpixel((300,399)) == GREEN
assert pillow_image.getpixel((299,400)) == ORANGE
assert pillow_image.getpixel((300,400)) == BLUE
def execute_with_pil_image(self, pil_image, image_request, crop=False, dither=Image.FLOYDSTEINBERG):
if crop and image_request.region_request_type is not FULL:
pil_image = self._crop(pil_image, image_request)
if image_request.size_request_type is not FULL:
pil_image = self._resize(pil_image, image_request)
if image_request.mirror:
pil_image = mirror(pil_image)
if image_request.rotation != 0.0:
pil_image = self._rotate(pil_image, image_request)
if image_request.quality != 'default':
pil_image = self._adjust_quality(pil_image, image_request, dither=dither)
return self._save_to_bytesio(pil_image, image_request)
def create_and_save_mirror(self, base_dir, ex_type, mask, im_patch, pic_id, seg_id, x_offset, y_offset, scale):
mir_im = ImageOps.mirror(im_patch)
mir_im.save(self.create_path(base_dir, ex_type, 'mir-im', pic_id, seg_id, x_offset, y_offset, scale))
mir_mask = ImageOps.mirror(mask)
mir_mask.save(self.create_path(base_dir, ex_type, 'mir-mask', pic_id, seg_id, x_offset, y_offset, scale))
def main():
x_min, x_max = det_size()
list_of_points = []
line = []
lines = []
with open('data.txt', 'r') as f:
for l in f.readlines():
x, y = (int(l.split(',')[0][1:])),(int(l.split(',')[1][:-2]))
list_of_points.append((x,y))
for i in range(x_max):
line = []
for j in range(x_max):
if (j,i) in list_of_points:
line.append(255)
else:
line.append(0)
lines.append(line)
y = np.array([np.array(xi) for xi in lines])
plt.imshow(y,cmap='Greys', interpolation='none')
name = 'qrdecode.png'
mir_name = name[:-4]+'_mirr.png'
plt.axis('off')
plt.savefig(name)
#plt.show()
im = Image.open(name)
im = ImageOps.mirror(im)
#im.show()
im.save(mir_name)
def main():
t = time.time()
img = imread(args.img_file_path)
imgs = [img, watermark(img), rotate(img), crop(img), mirror(img)]
imgs_norm = image_normalize(imgs)
dataset_features = np.load('fc6.npy')
query_start = time.time()
query_features = extract_feature(imgs_norm)
binarizer = preprocessing.Binarizer().fit(query_features)
query_features = binarizer.transform(query_features)
print(dataset_features)
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html#scipy.spatial.distance.cdist
cosine = distance.cdist(dataset_features, query_features, 'cosine')
print(cosine.shape)
dis = cosine
inds_all = argsort(dis, axis=0) # ???? https://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.html
print('query cost: %f, dataset: %d, query: %d' % (time.time() - query_start, len(dataset_features), len(imgs)))
img_names = load_image_names()
fig, axes = plt.subplots(5, 11, figsize=(22, 10), subplot_kw={'xticks': [], 'yticks': []})
fig.subplots_adjust(hspace=0.15, wspace=0.01, left=.02, right=.98, top=.92, bottom=.08)
titles = ['original', 'watermark', 'rotate', 'crop', 'mirror']
for i in range(len(imgs)):
topK = []
inds = inds_all[:, i]
# print(inds)
for k in range(10):
topK.append(img_names[inds[k]])
print(inds[k], dis[inds[k], i], img_names[inds[k]])
original = axes[i, 0]
original.set_title(titles[i])
img = imgs[i]
original.imshow(img)
for j in range(10):
ax = axes[i, j + 1]
img = imread(topK[j])
ax.imshow(img)
title = '%d : %f' % (j + 1, dis[inds[j], i])
ax.set_title(title)
savePath = args.img_file_path + '_search_result.jpg'
plt.savefig(savePath)
print(time.time() - t)
# os.system('open -a Preview.app -F ' + savePath)
def create_positive_canonical_and_noisy_examples_from_mask(self, im_arr, full_seg_im, orig_seg_patch, pic_patch,
bbox_patch, pic_id, seg_id, stats):
created_examples = 0
offsets = [-translation_shift, 0, translation_shift]
scales = [pow(2.0, scale_deformation), 1, pow(2.0, -scale_deformation)]
[orig_patch_center_x, orig_patch_center_y] = orig_seg_patch.center()
[orig_patch_width, orig_patch_height] = orig_seg_patch.size()
for scale_i in range(len(scales)):
for x_offset_i in range(len(offsets)):
for y_offset_i in range(len(offsets)):
new_patch_width = orig_patch_width * scales[scale_i]
new_patch_height = orig_patch_height * scales[scale_i]
new_patch_min_x = orig_patch_center_x - new_patch_width / 2 + offsets[x_offset_i]
new_patch_min_y = orig_patch_center_y - new_patch_height / 2 + offsets[y_offset_i]
new_patch = Patch(new_patch_min_x, new_patch_width, new_patch_min_y, new_patch_height)
if self.patch_exceeds_pic(new_patch, pic_patch):
stats.pos_noisy_seg_too_close_to_edges += 1
continue
if self.patch_exceeds_seg(new_patch, bbox_patch):
# this will not happen with the default constants (input size, max object dimension)
stats.pos_noisy_seg_cuts_seg += 1
continue
img_path = self.create_path(self.positive_output_dir, 'pos', 'im', pic_id, seg_id, x_offset_i,
y_offset_i, scale_i)
patch_im = self.create_and_save_image_patch(im_arr, new_patch, img_path)
mask_path = self.create_path(self.positive_output_dir, 'pos', 'mask', pic_id, seg_id,
x_offset_i, y_offset_i, scale_i)
patch_seg_im = self.create_and_save_mask(full_seg_im, new_patch, mask_path)
self.create_and_save_mirror(self.positive_output_dir, 'pos', patch_seg_im, patch_im, pic_id,
seg_id, x_offset_i, y_offset_i, scale_i)
created_examples += 2 # example and mirror
return created_examples
def Data_iterate_minibatches(inputs, targets, batchsize, arg=False, genSetting=None, shuffle=False, warpMode=None):
# assert len(inputs[0]) == len(targets[0])
if shuffle:
rinputs = copy.deepcopy(inputs)
rtargets = copy.deepcopy(targets)
indices = np.random.permutation(len(inputs[0]))
for i in range(len(inputs[0])):
for idx in range(len(inputs)):
rinputs[idx][i] = inputs[idx][indices[i]]
for idx in range(len(targets)):
rtargets[idx][i] = targets[idx][indices[i]]
inputs = rinputs
targets = rtargets
# inputs[:] = inputs[indices]
# targets[:] = targets[indices]
init = True
global input_tmp
global target_tmp
global isOK
for start_idx in range(0, len(inputs[0]) - batchsize*2 + 1, batchsize):
# if (isOK == False) and (two == False):
# inputsbatch, targetsbatch = read_pics(inputs[start_idx:start_idx + batchsize], targets[start_idx:start_idx + batchsize], batchsize, crop, mirror, flip, rotate)
# else:
while isOK == False:
if init:
sl = range(start_idx,start_idx + batchsize)
thread.start_new_thread(Data_readPics_thread, ([itemgetter(*sl)(i) for i in inputs], [itemgetter(*sl)(i) for i in targets], batchsize, genSetting, arg, warpMode))
init = False
# inputsbatch, targetsbatch = read_pics(inputs[start_idx:start_idx + batchsize], targets[start_idx:start_idx + batchsize], batchsize, crop, mirror, flip, rotate)
time.sleep(0.01)
inputsbatch, targetsbatch = input_tmp, target_tmp
isOK = False
sl = range(start_idx + batchsize,start_idx + 2 * batchsize)
thread.start_new_thread(Data_readPics_thread, ([itemgetter(*sl)(i) for i in inputs], [itemgetter(*sl)(i) for i in targets], batchsize, genSetting, arg, warpMode))
# yield itertools.chain(inputsbatch, targetsbatch)
yield inputsbatch + targetsbatch
while isOK == False:
time.sleep(0.01)
inputsbatch, targetsbatch = input_tmp, target_tmp
isOK = False
# yield itertools.chain(inputsbatch, targetsbatch)
yield inputsbatch + targetsbatch
# len(inputs) - batchsize*2 + 1