def _generate_batch(self, meta):
image = ndimage.imread(meta.image_path)
height, width, _ = meta.shape
if height > width:
scale = self._image_scale_size / width
else:
scale = self._image_scale_size / height
# TODO: the dimensions in caffe is (batch elem, channel, height, width)
resized_image = ndimage.zoom(image, (scale, scale, 1))
bboxes = np.empty((len(meta.objects), 5))
for i, obj in enumerate(meta.objects):
bboxes[i][:4] = obj['bbox']
bboxes[i][4] = obj['class_index']
return np.expand_dims(resized_image, 0), scale, bboxes
python类zoom()的实例源码
def cmd_zoom(args):
"""
Sub-command: "zoom", zoom the image to a new size with FoV coverage
preserved.
"""
fimage = FITSImage(args.infile)
print("Image size: %dx%d" % (fimage.Nx, fimage.Ny))
pixelsize = fimage.pixelsize
if pixelsize is None:
raise RuntimeError("--pixelsize required")
else:
print("Pixel size: %.1f [arcsec]" % pixelsize)
print("Field of view: (%.2f, %.2f) [deg]" % fimage.fov)
print("Zooming image ...")
print("Interpolation order: %d" % args.order)
print("Zoomed image size: %dx%d" % (args.size, args.size))
fimage.zoom(newsize=args.size, order=args.order)
print("Zoomed image pixel size: %.1f [arcsec]" % fimage.pixelsize)
fimage.write(args.outfile, clobber=args.clobber)
print("Saved zoomed FITS image to: %s" % args.outfile)
def predict_multi_scale(full_image, net, scales, sliding_evaluation, flip_evaluation):
"""Predict an image by looking at it with different scales."""
classes = net.model.outputs[0].shape[3]
full_probs = np.zeros((full_image.shape[0], full_image.shape[1], classes))
h_ori, w_ori = full_image.shape[:2]
for scale in scales:
print("Predicting image scaled by %f" % scale)
scaled_img = misc.imresize(full_image, size=scale, interp="bilinear")
if sliding_evaluation:
scaled_probs = predict_sliding(scaled_img, net, flip_evaluation)
else:
scaled_probs = net.predict(scaled_img, flip_evaluation)
# scale probs up to full size
h, w = scaled_probs.shape[:2]
probs = ndimage.zoom(scaled_probs, (1.*h_ori/h, 1.*w_ori/w, 1.),
order=1, prefilter=False)
# visualize_prediction(probs)
# integrate probs over all scales
full_probs += probs
full_probs /= len(scales)
return full_probs
def __init__(self, output_image_shape, interpolation_order=3, zoom_kwargs=None, **super_kwargs):
"""
Parameters
----------
output_image_shape : list or tuple or int
Target size of the output image. Aspect ratio may not be preserved.
interpolation_order : int
Interpolation order for the spline interpolation.
zoom_kwargs : dict
Keyword arguments for `scipy.ndimage.zoom`.
super_kwargs : dict
Keyword arguments for the superclass.
"""
super(Scale, self).__init__(**super_kwargs)
output_image_shape = (output_image_shape, output_image_shape) \
if isinstance(output_image_shape, int) else tuple(output_image_shape)
assert_(len(output_image_shape) == 2,
"`output_image_shape` must be an integer or a tuple of length 2.",
ValueError)
self.output_image_shape = output_image_shape
self.interpolation_order = interpolation_order
self.zoom_kwargs = {} if zoom_kwargs is None else dict(zoom_kwargs)
def image_function(self, image):
source_height, source_width = image.shape
target_height, target_width = self.output_image_shape
# We're on Python 3 - take a deep breath and relax.
zoom_height, zoom_width = (target_height / source_height), (target_width / source_width)
with catch_warnings():
# Ignore warning that scipy should be > 0.13 (it's 0.19 these days)
simplefilter('ignore')
rescaled_image = zoom(image, (zoom_height, zoom_width),
order=self.interpolation_order, **self.zoom_kwargs)
# This should never happen
assert_(rescaled_image.shape == (target_height, target_width),
"Shape mismatch that shouldn't have happened if you were on scipy > 0.13.0. "
"Are you on scipy > 0.13.0?",
ShapeError)
return rescaled_image
def update(self):
'''
'''
# Update plot
contour = np.zeros((self.ny,self.nx))
contour[np.where(self.aperture)] = 1
contour = np.lib.pad(contour, 1, self.PadWithZeros)
highres = zoom(contour, 100, order = 0, mode='nearest')
extent = np.array([-1, self.nx, -1, self.ny])
if self.contour is not None:
for coll in self.contour.collections:
self.ax.collections.remove(coll)
self.contour = self.ax.contour(highres, levels=[0.5], extent=extent, origin='lower', colors='r', linewidths=2)
self.update_bkg()
self.update_lc()
self.update_lcbkg()
self.fig.canvas.draw()
def read_file(filename, shape = None):
if filename.lower().endswith(".exr"):
depth_map = read_depth(filename)
return depth_map, depth_map < 1000.0
elif filename.lower().endswith(".png"):
depth_map = mpimg.imread(filename)
if shape is not None:
ih, iw = depth_map.shape
h, w = shape
if ih > 1024:
depth_map = depth_map[::2, ::2]
depth_map = zoom(depth_map, [float(h) / float(ih), w / float(iw)], order = 1)
mask = depth_map < 0.99
depth_map = depth_map * 65536 / 1000
return depth_map, mask
elif filename.lower().endswith(".npy"):
return np.load(filename), None
def draw_ori_on_img(img, ori, mask, fname, coh=None, stride=16):
ori = np.squeeze(ori)
mask = np.squeeze(np.round(mask))
img = np.squeeze(img)
ori = ndimage.zoom(ori, np.array(img.shape)/np.array(ori.shape, dtype=float), order=0)
if mask.shape != img.shape:
mask = ndimage.zoom(mask, np.array(img.shape)/np.array(mask.shape, dtype=float), order=0)
if coh is None:
coh = np.ones_like(img)
fig = plt.figure()
plt.imshow(img,cmap='gray')
plt.hold(True)
for i in xrange(stride,img.shape[0],stride):
for j in xrange(stride,img.shape[1],stride):
if mask[i, j] == 0:
continue
x, y, o, r = j, i, ori[i,j], coh[i,j]*(stride*0.9)
plt.plot([x, x+r*np.cos(o)], [y, y+r*np.sin(o)], 'r-')
plt.axis([0,img.shape[1],img.shape[0],0])
plt.axis('off')
plt.savefig(fname, bbox_inches='tight', pad_inches = 0)
plt.close(fig)
return
def subsample(a): # this is more a generic function then a method ...
"""
Returns a 2x2-subsampled version of array a (no interpolation, just cutting pixels in 4).
The version below is directly from the scipy cookbook on rebinning :
U{http://www.scipy.org/Cookbook/Rebinning}
There is ndimage.zoom(cutout.array, 2, order=0, prefilter=False), but it makes funny borders.
"""
"""
# Ouuwww this is slow ...
outarray = np.zeros((a.shape[0]*2, a.shape[1]*2), dtype=np.float64)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
outarray[2*i,2*j] = a[i,j]
outarray[2*i+1,2*j] = a[i,j]
outarray[2*i,2*j+1] = a[i,j]
outarray[2*i+1,2*j+1] = a[i,j]
return outarray
"""
# much better :
newshape = (2*a.shape[0], 2*a.shape[1])
slices = [slice(0,old, float(old)/new) for old,new in zip(a.shape,newshape) ]
coordinates = np.mgrid[slices]
indices = coordinates.astype('i') #choose the biggest smaller integer index
return a[tuple(indices)]
def subsample(a): # this is more a generic function then a method ...
"""
Returns a 2x2-subsampled version of array a (no interpolation, just cutting pixels in 4).
The version below is directly from the scipy cookbook on rebinning :
U{http://www.scipy.org/Cookbook/Rebinning}
There is ndimage.zoom(cutout.array, 2, order=0, prefilter=False), but it makes funny borders.
"""
"""
# Ouuwww this is slow ...
outarray = np.zeros((a.shape[0]*2, a.shape[1]*2), dtype=np.float64)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
outarray[2*i,2*j] = a[i,j]
outarray[2*i+1,2*j] = a[i,j]
outarray[2*i,2*j+1] = a[i,j]
outarray[2*i+1,2*j+1] = a[i,j]
return outarray
"""
# much better :
newshape = (2*a.shape[0], 2*a.shape[1])
slices = [slice(0,old, float(old)/new) for old,new in zip(a.shape,newshape) ]
coordinates = np.mgrid[slices]
indices = coordinates.astype('i') #choose the biggest smaller integer index
return a[tuple(indices)]
def zoom(self, newsize, order=1):
"""
Zoom the image to the specified ``newsize``, meanwhile the header
information will be updated accordingly to preserve the FoV coverage.
NOTE
----
The image aspect ratio cannot be changed.
Parameters
----------
newsize : (Nx, Ny) or N
The size of the zoomed image.
order : int, optional
The interpolation order, default: 1
"""
try:
Nx2, Ny2 = newsize
except TypeError:
Nx2 = Ny2 = newsize
zoom = ((Ny2+0.1)/self.Ny, (Nx2+0.1)/self.Nx)
if abs(zoom[0] - zoom[1]) > 1e-3:
raise RuntimeError("image aspect ratio cannot be changed")
pixelsize_old = self.pixelsize
self.image = ndimage.zoom(self.image, zoom=zoom, order=order)
self.pixelsize = pixelsize_old * (self.Nx / Nx2)
return self.image
def predict(self, img, flip_evaluation):
"""
Predict segementation for an image.
Arguments:
img: must be rowsxcolsx3
"""
h_ori, w_ori = img.shape[:2]
if img.shape[0:2] != self.input_shape:
print("Input %s not fitting for network size %s, resizing. You may want to try sliding prediction for better results." % (img.shape[0:2], self.input_shape))
img = misc.imresize(img, self.input_shape)
input_data = self.preprocess_image(img)
# utils.debug(self.model, input_data)
regular_prediction = self.model.predict(input_data)[0]
if flip_evaluation:
print("Predict flipped")
flipped_prediction = np.fliplr(self.model.predict(np.flip(input_data, axis=2))[0])
prediction = (regular_prediction + flipped_prediction) / 2.0
else:
prediction = regular_prediction
if img.shape[0:1] != self.input_shape: # upscale prediction if necessary
h, w = prediction.shape[:2]
prediction = ndimage.zoom(prediction, (1.*h_ori/h, 1.*w_ori/w, 1.),
order=1, prefilter=False)
return prediction
def deepdream(net, base_img, iter_n=11, octave_n=4, octave_scale=1.4,
end='inception_4c/output', clip=True, **step_params):
#BACKUP high detail: def deepdream(net, base_img, iter_n=12, octave_n=6, octave_scale=1.6,end='inception_5b/pool_proj', clip=True, **step_params):
#deepdream(net, base_img, iter_n=10, octave_n=7, octave_scale=1.6,end='prob', clip=False, **step_params):
#function params>>net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end='inception_5b/5x5', clip=True, **step_params
# prepare base images for all octaves
octaves = [preprocess(net, base_img)]
for i in xrange(octave_n-1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
src = net.blobs['data']
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
src.reshape(1,3,h,w) # resize the network's input image size
src.data[0] = octave_base+detail
for i in xrange(iter_n):
make_step(net, end=end, clip=clip, **step_params)
# visualization
vis = deprocess(net, src.data[0])
if not clip: # adjust image contrast if clipping is disabled
vis = vis*(255.0/np.percentile(vis, 99.98))
showarray(vis)
print octave, i, end, vis.shape
clear_output(wait=True)
# extract details produced on the current octave
detail = src.data[0]-octave_base
# returning the resulting image
return deprocess(net, src.data[0])
#///////////////////////////////////////////////////////////////
def deepdream(net, base_img, iter_n=11, octave_n=4, octave_scale=1.4,
end='inception_4c/output', clip=True, **step_params):
#BACKUP high detail: def deepdream(net, base_img, iter_n=12, octave_n=6, octave_scale=1.6,end='inception_5b/pool_proj', clip=True, **step_params):
#deepdream(net, base_img, iter_n=10, octave_n=7, octave_scale=1.6,end='prob', clip=False, **step_params):
#function params>>net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end='inception_5b/5x5', clip=True, **step_params
# prepare base images for all octaves
octaves = [preprocess(net, base_img)]
for i in xrange(octave_n-1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
src = net.blobs['data']
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
src.reshape(1,3,h,w) # resize the network's input image size
src.data[0] = octave_base+detail
for i in xrange(iter_n):
make_step(net, end=end, clip=clip, **step_params)
# visualization
vis = deprocess(net, src.data[0])
if not clip: # adjust image contrast if clipping is disabled
vis = vis*(255.0/np.percentile(vis, 99.98))
showarray(vis)
print octave, i, end, vis.shape
clear_output(wait=True)
# extract details produced on the current octave
detail = src.data[0]-octave_base
# returning the resulting image
return deprocess(net, src.data[0])
#///////////////////////////////////////////////////////////////
def deepdream(net, base_img, iter_n=12, octave_n=6, octave_scale=1.6, end='inception_5b/output', clip=True, **step_params):
#BACKUP high detail: def deepdream(net, base_img, iter_n=12, octave_n=6, octave_scale=1.6,end='inception_5b/pool_proj', clip=True, **step_params):
#deepdream(net, base_img, iter_n=10, octave_n=7, octave_scale=1.6,end='prob', clip=False, **step_params):
#function params>>net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end='inception_5b/5x5', clip=True, **step_params
# prepare base images for all octaves
octaves = [preprocess(net, base_img)]
for i in xrange(octave_n-1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
src = net.blobs['data']
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
src.reshape(1,3,h,w) # resize the network's input image size
src.data[0] = octave_base+detail
for i in xrange(iter_n):
make_step(net, end=end, clip=clip, **step_params)
# visualization
vis = deprocess(net, src.data[0])
if not clip: # adjust image contrast if clipping is disabled
vis = vis*(255.0/np.percentile(vis, 99.98))
showarray(vis)
print octave, i, end, vis.shape
clear_output(wait=True)
# extract details produced on the current octave
detail = src.data[0]-octave_base
# returning the resulting image
return deprocess(net, src.data[0])
#SELECT HERE THE PICTURE YOU WANT TO DRAW THE DREAM ON:
def deepdream(net, base_img, iter_n=5, octave_n=4, octave_scale=1.4,
end='inception_4d/output', clip=True, **step_params):
#BACKUP high detail: def deepdream(net, base_img, iter_n=12, octave_n=6, octave_scale=1.6,end='inception_5b/pool_proj', clip=True, **step_params):
#deepdream(net, base_img, iter_n=10, octave_n=7, octave_scale=1.6,end='prob', clip=False, **step_params):
#function params>>net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end='inception_5b/5x5', clip=True, **step_params
# prepare base images for all octaves
octaves = [preprocess(net, base_img)]
for i in xrange(octave_n-1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
src = net.blobs['data']
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
src.reshape(1,3,h,w) # resize the network's input image size
src.data[0] = octave_base+detail
for i in xrange(iter_n):
make_step(net, end=end, clip=clip, **step_params)
# visualization
vis = deprocess(net, src.data[0])
if not clip: # adjust image contrast if clipping is disabled
vis = vis*(255.0/np.percentile(vis, 99.98))
showarray(vis)
print octave, i, end, vis.shape
clear_output(wait=True)
# extract details produced on the current octave
detail = src.data[0]-octave_base
# returning the resulting image
return deprocess(net, src.data[0])
#////////////////////////////////////////////////////////////////////////////////////
#SELECT SOURCE PICTURE & SET FRAME SUM
#////////////////////////////////////////////////////////////////////////////////////
def deepdream(net, base_img, iter_n=11, octave_n=4, octave_scale=1.4,
end='inception_5a/output', clip=False, **step_params):
#BACKUP high detail: def deepdream(net, base_img, iter_n=12, octave_n=6, octave_scale=1.6,end='inception_5b/pool_proj', clip=True, **step_params):
#deepdream(net, base_img, iter_n=10, octave_n=7, octave_scale=1.6,end='prob', clip=False, **step_params):
#function params>>net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end='inception_5b/5x5', clip=True, **step_params
# prepare base images for all octaves
octaves = [preprocess(net, base_img)]
for i in xrange(octave_n-1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
src = net.blobs['data']
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
src.reshape(1,3,h,w) # resize the network's input image size
src.data[0] = octave_base+detail
for i in xrange(iter_n):
make_step(net, end=end, clip=clip, **step_params)
# visualization
vis = deprocess(net, src.data[0])
if not clip: # adjust image contrast if clipping is disabled
vis = vis*(255.0/np.percentile(vis, 100))
#vis = vis*(255.0/np.percentile(vis, 99.98))
showarray(vis)
print octave, i, end, vis.shape
clear_output(wait=True)
# extract details produced on the current octave
detail = src.data[0]-octave_base
# returning the resulting image
return deprocess(net, src.data[0])
#///////////////////////////////////////////////////////////////
def deepdream(net, base_img, iter_n=11, octave_n=4, octave_scale=1.3, end='inception_4c/output', clip=True, **step_params):
#BACKUP high detail: def deepdream(net, base_img, iter_n=12, octave_n=6, octave_scale=1.6,end='inception_5b/pool_proj', clip=True, **step_params):
#deepdream(net, base_img, iter_n=10, octave_n=7, octave_scale=1.6,end='prob', clip=False, **step_params):
#function params>>net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end='inception_5b/5x5', clip=True, **step_params
# prepare base images for all octaves
octaves = [preprocess(net, base_img)]
for i in xrange(octave_n-1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
src = net.blobs['data']
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
src.reshape(1,3,h,w) # resize the network's input image size
src.data[0] = octave_base+detail
for i in xrange(iter_n):
make_step(net, end=end, clip=clip, **step_params)
# visualization
vis = deprocess(net, src.data[0])
if not clip: # adjust image contrast if clipping is disabled
vis = vis*(255.0/np.percentile(vis, 99.98))
showarray(vis)
print octave, i, end, vis.shape
clear_output(wait=True)
# extract details produced on the current octave
detail = src.data[0]-octave_base
# returning the resulting image
return deprocess(net, src.data[0])
#SELECT HERE THE PICTURE YOU WANT TO DRAW THE DREAM ON:
#///////////////////////////////////////////////////////////////
def deepdream(net, base_img, iter_n=12, octave_n=6, octave_scale=1.6, end='inception_4b/output', clip=True, **step_params):
#BACKUP high detail: def deepdream(net, base_img, iter_n=12, octave_n=6, octave_scale=1.6,end='inception_5b/pool_proj', clip=True, **step_params):
#deepdream(net, base_img, iter_n=10, octave_n=7, octave_scale=1.6,end='prob', clip=False, **step_params):
#function params>>net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end='inception_5b/5x5', clip=True, **step_params
# prepare base images for all octaves
octaves = [preprocess(net, base_img)]
for i in xrange(octave_n-1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
src = net.blobs['data']
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
src.reshape(1,3,h,w) # resize the network's input image size
src.data[0] = octave_base+detail
for i in xrange(iter_n):
make_step(net, end=end, clip=clip, **step_params)
# visualization
vis = deprocess(net, src.data[0])
if not clip: # adjust image contrast if clipping is disabled
vis = vis*(255.0/np.percentile(vis, 99.98))
showarray(vis)
#silent print
print octave, i, end, vis.shape
clear_output(wait=True)
# extract details produced on the current octave
detail = src.data[0]-octave_base
# returning the resulting image
return deprocess(net, src.data[0])
#SELECT HERE THE PICTURE YOU WANT TO DRAW THE DREAM ON:
def _scale_interp_builtin(array, scale_value, mode='constant', cval=0):
scaled = ndimage.zoom(array, scale_value, order=3, mode=mode, cval=cval)
return scaled
def Inversion(Qsca,Qabs,wavelength,diameter,nMin=1,nMax=3,kMin=0.001,kMax=1,scatteringPrecision=0.010,absorptionPrecision=0.010,spaceSize=120,interp=2):
error = lambda measured,calculated: np.abs((calculated-measured)/measured)
nRange = np.linspace(nMin,nMax,spaceSize)
kRange = np.logspace(np.log10(kMin),np.log10(kMax),spaceSize)
scaSpace = np.zeros((spaceSize,spaceSize))
absSpace = np.zeros((spaceSize,spaceSize))
for ni,n in enumerate(nRange):
for ki,k in enumerate(kRange):
_derp = fastMieQ(n+(1j*k),wavelength,diameter)
scaSpace[ni][ki] = _derp[0]
absSpace[ni][ki] = _derp[1]
if interp is not None:
nRange = zoom(nRange,interp)
kRange = zoom(kRange,interp)
scaSpace = zoom(scaSpace,interp)
absSpace = zoom(absSpace,interp)
scaSolutions = np.where(np.logical_and(Qsca*(1-scatteringPrecision)<scaSpace, scaSpace<Qsca*(1+scatteringPrecision)))
absSolutions = np.where(np.logical_and(Qabs*(1-absorptionPrecision)<absSpace, absSpace<Qabs*(1+absorptionPrecision)))
validScattering = nRange[scaSolutions[0]]+1j*kRange[scaSolutions[1]]
validAbsorption = nRange[absSolutions[0]]+1j*kRange[absSolutions[1]]
solution = np.intersect1d(validScattering,validAbsorption)
# errors = [error()]
return solution
def Inversion_SD(Bsca,Babs,wavelength,dp,ndp,nMin=1,nMax=3,kMin=0,kMax=1,scatteringPrecision=0.001,absorptionPrecision=0.001,spaceSize=40,interp=2):
dp = coerceDType(dp)
ndp = coerceDType(ndp)
nRange = np.linspace(nMin,nMax,spaceSize)
kRange = np.linspace(kMin,kMax,spaceSize)
scaSpace = np.zeros((spaceSize,spaceSize))
absSpace = np.zeros((spaceSize,spaceSize))
for ni,n in enumerate(nRange):
for ki,k in enumerate(kRange):
_derp = fastMie_SD(n+(1j*k),wavelength,dp,ndp)
scaSpace[ni][ki] = _derp[0]
absSpace[ni][ki] = _derp[1]
if interp is not None:
nRange = zoom(nRange,interp)
kRange = zoom(kRange,interp)
scaSpace = zoom(scaSpace,interp)
absSpace = zoom(absSpace,interp)
scaSolutions = np.where(np.logical_and(Bsca*(1-scatteringPrecision)<scaSpace, scaSpace<Bsca*(1+scatteringPrecision)))
absSolutions = np.where(np.logical_and(Babs*(1-absorptionPrecision)<absSpace, absSpace<Babs*(1+absorptionPrecision)))
validScattering = nRange[scaSolutions[0]]+1j*kRange[scaSolutions[1]]
validAbsorption = nRange[absSolutions[0]]+1j*kRange[absSolutions[1]]
return np.intersect1d(validScattering,validAbsorption)
def Inversion(Qsca,Qabs,wavelength,diameter,nMin=1,nMax=3,kMin=0.001,kMax=1,scatteringPrecision=0.010,absorptionPrecision=0.010,spaceSize=120,interp=2):
error = lambda measured,calculated: np.abs((calculated-measured)/measured)
nRange = np.linspace(nMin,nMax,spaceSize)
kRange = np.logspace(np.log10(kMin),np.log10(kMax),spaceSize)
scaSpace = np.zeros((spaceSize,spaceSize))
absSpace = np.zeros((spaceSize,spaceSize))
for ni,n in enumerate(nRange):
for ki,k in enumerate(kRange):
_derp = fastMieQ(n+(1j*k),wavelength,diameter)
scaSpace[ni][ki] = _derp[0]
absSpace[ni][ki] = _derp[1]
if interp is not None:
nRange = zoom(nRange,interp)
kRange = zoom(kRange,interp)
scaSpace = zoom(scaSpace,interp)
absSpace = zoom(absSpace,interp)
scaSolutions = np.where(np.logical_and(Qsca*(1-scatteringPrecision)<scaSpace, scaSpace<Qsca*(1+scatteringPrecision)))
absSolutions = np.where(np.logical_and(Qabs*(1-absorptionPrecision)<absSpace, absSpace<Qabs*(1+absorptionPrecision)))
validScattering = nRange[scaSolutions[0]]+1j*kRange[scaSolutions[1]]
validAbsorption = nRange[absSolutions[0]]+1j*kRange[absSolutions[1]]
solution = np.intersect1d(validScattering,validAbsorption)
# errors = [error()]
return solution
def Inversion_SD(Bsca,Babs,wavelength,dp,ndp,nMin=1,nMax=3,kMin=0,kMax=1,scatteringPrecision=0.001,absorptionPrecision=0.001,spaceSize=40,interp=2):
dp = coerceDType(dp)
ndp = coerceDType(ndp)
nRange = np.linspace(nMin,nMax,spaceSize)
kRange = np.linspace(kMin,kMax,spaceSize)
scaSpace = np.zeros((spaceSize,spaceSize))
absSpace = np.zeros((spaceSize,spaceSize))
for ni,n in enumerate(nRange):
for ki,k in enumerate(kRange):
_derp = fastMie_SD(n+(1j*k),wavelength,dp,ndp)
scaSpace[ni][ki] = _derp[0]
absSpace[ni][ki] = _derp[1]
if interp is not None:
nRange = zoom(nRange,interp)
kRange = zoom(kRange,interp)
scaSpace = zoom(scaSpace,interp)
absSpace = zoom(absSpace,interp)
scaSolutions = np.where(np.logical_and(Bsca*(1-scatteringPrecision)<scaSpace, scaSpace<Bsca*(1+scatteringPrecision)))
absSolutions = np.where(np.logical_and(Babs*(1-absorptionPrecision)<absSpace, absSpace<Babs*(1+absorptionPrecision)))
validScattering = nRange[scaSolutions[0]]+1j*kRange[scaSolutions[1]]
validAbsorption = nRange[absSolutions[0]]+1j*kRange[absSolutions[1]]
return np.intersect1d(validScattering,validAbsorption)
def getVoxelFromMat(path, cube_len=64):
"""Mat ???? ?? Voxel ? ???? ??"""
voxels = io.loadmat(path)['instance']
voxels = np.pad(voxels, (1, 1), 'constant', constant_values=(0, 0))
if cube_len != 32 and cube_len == 64:
voxels = nd.zoom(voxels, (2, 2, 2), mode='constant', order=0)
return voxels
def load_itk_image_rescaled(filename, slice_mm):
im, origin, spacing = load_itk_image(filename)
new_im = zoom(im, [spacing[0]/slice_mm,1.0,1.0])
return new_im
def load(self, infile, frequency=None):
"""
Load input sky image from file into this instance.
Parameters
----------
infile : str
The path to the input sky patch
frequency : float, optional
The frequency of the sky patch;
Unit: [MHz]
"""
self.infile = infile
if frequency is not None:
self.frequency = frequency
with fits.open(infile) as f:
self.data = f[0].data
header = f[0].header.copy(strip=True)
self.header_.extend(header, update=True)
self.ysize_in, self.xsize_in = self.data.shape
logger.info("Loaded sky patch from: %s (%dx%d)" %
(infile, self.xsize_in, self.ysize_in))
if (self.xsize_in != self.xsize) or (self.ysize_in != self.ysize):
logger.warning("Scale input sky patch to size %dx%d" %
(self.xsize, self.ysize))
zoom = ((self.ysize+0.1)/self.ysize_in,
(self.xsize+0.1)/self.xsize_in)
self.data = ndimage.zoom(self.data, zoom=zoom, order=1)
def circle2ellipse(imgcirc, bfraction, rotation=None):
"""
Shrink the input circle image with respect to the center along the
column (axis) to transform the circle to an ellipse, and then rotate
around the image center.
Parameters
----------
imgcirc : 2D `~numpy.ndarray`
Input image grid containing a circle at the center
bfraction : float
The fraction of the semi-minor axis w.r.t. the semi-major axis
(i.e., the half width of the input image), to determine the
shrunk size (height) of the output image.
Should be a fraction within [0, 1]
rotation : float, optional
Rotation angle (unit: [deg])
Default: ``None`` (i.e., no rotation)
Returns
-------
imgout : 2D `~numpy.ndarray`
Image of the same size as the input circle image.
"""
nrow, ncol = imgcirc.shape
# Shrink the circle to be elliptical
nrow2 = nrow * bfraction
nrow2 = int(nrow2 / 2) * 2 + 1 # be odd
# NOTE: zoom() calculate the output shape with round() instead of int();
# fix the warning about they may be different.
zoom = ((nrow2+0.1)/nrow, 1)
img2 = ndimage.zoom(imgcirc, zoom=zoom, order=1)
# Pad the shrunk image to have the same size as input
imgout = np.zeros(shape=(nrow, ncol))
r1 = int((nrow - nrow2) / 2)
imgout[r1:(r1+nrow2), :] = img2
if rotation:
imgout = ndimage.rotate(imgout, angle=rotation, reshape=False, order=1)
return imgout
def heatmap2segconf(heat_maps, imshape_original, gt_cls):
heat_maps_os = nd.zoom(heat_maps,
[1,
float(imshape_original[0]) / heat_maps.shape[1],
float(imshape_original[1]) / heat_maps.shape[2]],
order=1)
heat_maps_norm = heat_maps_os / heat_maps_os.max(axis=1).max(axis=1).reshape((-1, 1, 1))
confidence = heat_maps_norm.max(0)
seg = gt_cls[heat_maps_norm.argmax(axis=0)]
return seg, confidence
guided_dreams.py 文件源码
项目:deepdream-neural-style-transfer
作者: rdcolema
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def deepdream(net, base_img, iter_n=5, octave_n=11, octave_scale=1.4, end='inception_3b/5x5_reduce', clip=True,
**step_params):
# prepare base images for all octaves
octaves = [preprocess(net, base_img)]
for i in xrange(octave_n - 1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0 / octave_scale, 1.0 / octave_scale), order=1))
src = net.blobs['data']
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0 * h / h1, 1.0 * w / w1), order=1)
src.reshape(1, 3, h, w) # resize the network's input image size
src.data[0] = octave_base + detail
for i in xrange(iter_n):
make_step(net, end=end, clip=clip, **step_params)
# visualization
vis = deprocess(net, src.data[0])
if not clip: # adjust image contrast if clipping is disabled
vis = vis * (255.0 / np.percentile(vis, 99.98))
showarray(vis)
print octave, i, end, vis.shape
clear_output(wait=True)
# extract details produced on the current octave
detail = src.data[0] - octave_base
# returning the resulting image
return deprocess(net, src.data[0])