def read_images_info(path):
for subdir, dirs, files in os.walk(path):
dcms = glob.glob(os.path.join(subdir, '*.dcm'))
if len(dcms) > 1:
slices = [dicom.read_file(dcm) for dcm in dcms]
slices.sort(key = lambda x: float(x.ImagePositionPatient[2]))
images = np.stack([s.pixel_array for s in slices], axis=0).astype(np.float32)
images = images + slices[0].RescaleIntercept
orig_shape = images.shape
inplane_scale = slices[0].PixelSpacing[0] / PIXEL_SPACING
inplane_size = int(np.rint(inplane_scale * slices[0].Rows / 2) * 2)
return orig_shape, inplane_size
python类rint()的实例源码
prepare_data_for_submission.py 文件源码
项目:aapm_thoracic_challenge
作者: xf4j
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def time_slice(self, t_start, t_stop):
'''
Creates a new AnalogSignal corresponding to the time slice of the
original AnalogSignal between times t_start, t_stop. Note, that for
numerical stability reasons if t_start, t_stop do not fall exactly on
the time bins defined by the sampling_period they will be rounded to
the nearest sampling bins.
'''
# checking start time and transforming to start index
if t_start is None:
i = 0
else:
t_start = t_start.rescale(self.sampling_period.units)
i = (t_start - self.t_start) / self.sampling_period
i = int(np.rint(i.magnitude))
# checking stop time and transforming to stop index
if t_stop is None:
j = len(self)
else:
t_stop = t_stop.rescale(self.sampling_period.units)
j = (t_stop - self.t_start) / self.sampling_period
j = int(np.rint(j.magnitude))
if (i < 0) or (j > len(self)):
raise ValueError('t_start, t_stop have to be withing the analog \
signal duration')
# we're going to send the list of indicies so that we get *copy* of the
# sliced data
obj = super(AnalogSignal, self).__getitem__(np.arange(i, j, 1))
obj.t_start = self.t_start + i * self.sampling_period
return obj
def time_slice(self, t_start, t_stop):
'''
Creates a new AnalogSignal corresponding to the time slice of the
original AnalogSignal between times t_start, t_stop. Note, that for
numerical stability reasons if t_start, t_stop do not fall exactly on
the time bins defined by the sampling_period they will be rounded to
the nearest sampling bins.
'''
# checking start time and transforming to start index
if t_start is None:
i = 0
else:
t_start = t_start.rescale(self.sampling_period.units)
i = (t_start - self.t_start) / self.sampling_period
i = int(np.rint(i.magnitude))
# checking stop time and transforming to stop index
if t_stop is None:
j = len(self)
else:
t_stop = t_stop.rescale(self.sampling_period.units)
j = (t_stop - self.t_start) / self.sampling_period
j = int(np.rint(j.magnitude))
if (i < 0) or (j > len(self)):
raise ValueError('t_start, t_stop have to be withing the analog \
signal duration')
# we're going to send the list of indicies so that we get *copy* of the
# sliced data
obj = super(AnalogSignal, self).__getitem__(np.arange(i, j, 1))
obj.t_start = self.t_start + i * self.sampling_period
return obj
def getJitteredImgs(self, img, num, maxRot=(-5.0, 5.0), maxTranslate=(-2.0, 2.0), maxScale=(-0.1, 0.1), augmentColor=False):
"""
Take img and jitter it
:return: a list of all jittered images
"""
cx = img.size[0] / 2
cy = img.size[1] / 2
tMats = self.getJitteredParams(center=(cx, cy), num=num, maxRot=maxRot, maxTranslate=maxTranslate,
maxScale=maxScale)
imgs = []
for i in range(len(tMats)):
t = tMats[i]
imgT = self.transformImg(img, t)
if augmentColor:
# jitter colors
color = ImageEnhance.Color(imgT)
imgT = color.enhance(self.rng.uniform(0.7, 1))
# jitter contrast
contr = ImageEnhance.Contrast(imgT)
imgT = contr.enhance(self.rng.uniform(0.7, 1))
# jitter brightness
bright = ImageEnhance.Brightness(imgT)
imgT = bright.enhance(self.rng.uniform(0.7, 1))
# add noise
im = numpy.asarray(imgT).astype('int') + numpy.rint(self.rng.normal(0, 4, numpy.asarray(imgT).shape)).astype('int')
im = numpy.clip(im, 0, 255).astype('uint8')
imgT = Image.fromarray(im)
# add image
imgs.append(imgT)
return imgs, tMats
def data_prep_function(data, luna_annotations, pixel_spacing, luna_origin,
p_transform=p_transform,
p_transform_augment=None):
# make sure the data is processed the same way
lung_mask = lung_segmentation.segment_HU_scan_ira(data)
annotatations_out = []
for zyxd in luna_annotations:
zyx = np.array(zyxd[:3])
voxel_coords = utils_lung.world2voxel(zyx, luna_origin, pixel_spacing)
zyxd_out = np.rint(np.append(voxel_coords, zyxd[-1]))
annotatations_out.append(zyxd_out)
annotatations_out = np.asarray(annotatations_out)
return lung_mask, lung_mask, lung_mask, annotatations_out, None
def processMatrix(self):
self._transformedMin = numpy.array([999999999999,999999999999,999999999999], numpy.float64)
self._transformedMax = numpy.array([-999999999999,-999999999999,-999999999999], numpy.float64)
self._boundaryCircleSize = 0
hull = numpy.zeros((0, 2), numpy.int)
for m in self._meshList:
transformedVertexes = m.getTransformedVertexes()
hull = polygon.convexHull(numpy.concatenate((numpy.rint(transformedVertexes[:,0:2]).astype(int), hull), 0))
transformedMin = transformedVertexes.min(0)
transformedMax = transformedVertexes.max(0)
for n in xrange(0, 3):
self._transformedMin[n] = min(transformedMin[n], self._transformedMin[n])
self._transformedMax[n] = max(transformedMax[n], self._transformedMax[n])
#Calculate the boundary circle
transformedSize = transformedMax - transformedMin
center = transformedMin + transformedSize / 2.0
boundaryCircleSize = round(math.sqrt(numpy.max(((transformedVertexes[::,0] - center[0]) * (transformedVertexes[::,0] - center[0])) + ((transformedVertexes[::,1] - center[1]) * (transformedVertexes[::,1] - center[1])) + ((transformedVertexes[::,2] - center[2]) * (transformedVertexes[::,2] - center[2])))), 3)
self._boundaryCircleSize = max(self._boundaryCircleSize, boundaryCircleSize)
self._transformedSize = self._transformedMax - self._transformedMin
self._drawOffset = (self._transformedMax + self._transformedMin) / 2
self._drawOffset[2] = self._transformedMin[2]
self._transformedMax -= self._drawOffset
self._transformedMin -= self._drawOffset
self._boundaryHull = polygon.minkowskiHull((hull.astype(numpy.float32) - self._drawOffset[0:2]), numpy.array([[-1,-1],[-1,1],[1,1],[1,-1]],numpy.float32))
self._printAreaHull = polygon.minkowskiHull(self._boundaryHull, self._printAreaExtend)
self.setHeadArea(self._headAreaExtend, self._headMinSize)
def generate_patch_locations(patches, patch_size, im_size):
nx = round((patches * 8 * im_size[0] * im_size[0] / im_size[1] / im_size[2]) ** (1.0 / 3))
ny = round(nx * im_size[1] / im_size[0])
nz = round(nx * im_size[2] / im_size[0])
x = np.rint(np.linspace(patch_size, im_size[0] - patch_size, num=nx))
y = np.rint(np.linspace(patch_size, im_size[1] - patch_size, num=ny))
z = np.rint(np.linspace(patch_size, im_size[2] - patch_size, num=nz))
return x, y, z
def perturb_patch_locations(patch_locations, radius):
x, y, z = patch_locations
x = np.rint(x + np.random.uniform(-radius, radius, len(x)))
y = np.rint(y + np.random.uniform(-radius, radius, len(y)))
z = np.rint(z + np.random.uniform(-radius, radius, len(z)))
return x, y, z
def test_rint_big_int():
# np.rint bug for large integer values on Windows 32-bit and MKL
# https://github.com/numpy/numpy/issues/6685
val = 4607998452777363968
# This is exactly representable in floating point
assert_equal(val, int(float(val)))
# Rint should not change the value
assert_equal(val, np.rint(val))
def _normalize_shape(ndarray, shape, cast_to_int=True):
ndims = ndarray.ndim
if shape is None:
return ((None, None), ) * ndims
ndshape = numpy.asarray(shape)
if ndshape.size == 1:
ndshape = numpy.repeat(ndshape, 2)
if ndshape.ndim == 1:
ndshape = numpy.tile(ndshape, (ndims, 1))
if ndshape.shape != (ndims, 2):
message = 'Unable to create correctly shaped tuple from %s' % shape
raise ValueError(message)
if cast_to_int:
ndshape = numpy.rint(ndshape).astype(int)
return tuple(tuple(axis) for axis in ndshape)
def calc_kappa(self, train_pred, dev_pred, test_pred, weight='quadratic'):
train_pred_int = np.rint(train_pred).astype('int32')
dev_pred_int = np.rint(dev_pred).astype('int32')
test_pred_int = np.rint(test_pred).astype('int32')
self.train_qwk = kappa(self.train_y_org, train_pred, weight)
self.dev_qwk = kappa(self.dev_y_org, dev_pred, weight)
self.test_qwk = kappa(self.test_y_org, test_pred, weight)
def sanitise_image(mat):
int_mat = np.rint(mat).astype(int)
return np.clip(int_mat, 0, 255).astype(np.uint8)
def extract_RGB_LBP_features(image, labels, size=5, P=8, R=2):
n_sp = np.max(labels)+1
hs = size//2
img_superpixel = np.zeros_like(labels, dtype='int')
# calculate lbp for entire region
lbp_img = np.empty((3, ), dtype='object')
for d in range(3):
lbp_img[d] = local_binary_pattern(image[..., d], P=P, R=R, method='uniform')
feat_desc_size = P+1
feat_descs = np.zeros((n_sp, feat_desc_size*3))
for i in range(n_sp):
# get centroid of i'th superpixel
img_superpixel[:] = labels == i
cy, cx = [np.rint(x).astype('int') for x in regionprops(img_superpixel)[0].centroid]
# extract lbp values in sizeXsize region centred on the centroid
x0, y0, x1, y1 = cx-hs, cy-hs, cx+hs+1, cy+hs+1
# clip to boundaries of image
x0 = 0 if x0 < 0 else x0
y0 = 0 if y0 < 0 else y0
x1 = image.shape[1]-1 if x1 > image.shape[1]-2 else x1
y1 = image.shape[0]-1 if y1 > image.shape[0]-2 else y1
# fill in the feature vector for each image channel
for d in range(3):
j, k = d*feat_desc_size, (1+d)*feat_desc_size
patch = lbp_img[d][y0:y1, x0:x1].flat
fv = np.histogram(patch, bins=np.arange(0, feat_desc_size+1), range=(0, feat_desc_size+1))[0]
feat_descs[i, j:k] = fv
return feat_descs
def get_search_region(bbox, frame, ratio):
"""
Calculates coordinates of ratio*width/height of axis-aligned bbox, centred
on the original bbox, constrained by the original size of the image.
Arguments:
bbox = axis-aligned bbox of the form [x0, y0, width, height]
frame = MxNxD Image to constrain bbox by
ratio = ratio at which to change bbox dimensions by
Output:
x0, y0, x1, y1 = Coordinates of expanded axis-aligned bbox
"""
x0, y0, w, h = bbox
ih, iw = frame.shape[:2]
ww, hh = ratio*w, ratio*h
# expand bbox by ratio
x1 = np.min([iw-1, x0 + w/2 + ww/2])
y1 = np.min([ih-1, y0 + h/2 + hh/2])
x0 = np.max([0, x0 + w/2 - ww/2])
y0 = np.max([0, y0 + h/2 - hh/2])
x0, y0, x1, y1 = np.rint(np.array([x0, y0, x1, y1])).astype('int')
return x0, y0, x1, y1
def worldToVoxelCoord(worldCoord, origin, spacing):
"""
only valid if there is no rotation component
"""
voxelCoord = np.rint((worldCoord-origin)/ spacing).astype(np.int);
return voxelCoord
def calc_all_sigams(data, sigmas):
batchs = 128
num_of_bins = 8
# bins = np.linspace(-1, 1, num_of_bins).astype(np.float32)
# bins = stats.mstats.mquantiles(np.squeeze(data.reshape(1, -1)), np.linspace(0,1, num=num_of_bins))
# data = bins[np.digitize(np.squeeze(data.reshape(1, -1)), bins) - 1].reshape(len(data), -1)
batch_points = np.rint(np.arange(0, data.shape[0] + 1, batchs)).astype(dtype=np.int32)
I_XT = []
num_of_rand = min(800, data.shape[1])
for sigma in sigmas:
# print sigma
I_XT_temp = 0
for i in range(0, len(batch_points) - 1):
new_data = data[batch_points[i]:batch_points[i + 1], :]
rand_indexs = np.random.randint(0, new_data.shape[1], num_of_rand)
new_data = new_data[:, :]
N = new_data.shape[0]
d = new_data.shape[1]
diff_mat = np.linalg.norm(((new_data[:, np.newaxis, :] - new_data)), axis=2)
# print diff_mat.shape, new_data.shape
s0 = 0.2
# DOTO -add leaveoneout validation
res = minimize(optimiaze_func, s0, args=(diff_mat, d, N), method='nelder-mead',
options={'xtol': 1e-8, 'disp': False, 'maxiter': 6})
eta = res.x
diff_mat0 = - 0.5 * (diff_mat / (sigma ** 2 + eta ** 2))
diff_mat1 = np.sum(np.exp(diff_mat0), axis=0)
diff_mat2 = -(1.0 / N) * np.sum(np.log2((1.0 / N) * diff_mat1))
I_XT_temp += diff_mat2 - d * np.log2((sigma ** 2) / (eta ** 2 + sigma ** 2))
# print diff_mat2 - d*np.log2((sigma**2)/(eta**2+sigma**2))
I_XT_temp /= len(batch_points)
I_XT.append(I_XT_temp)
sys.stdout.flush()
return I_XT
def test(mfault):
from clawpack.clawutil.data import ClawData
length_scale = 1.e-3 # m to km
probdata = ClawData()
probdata.read('setprob.data',force=True)
fault = dtopotools.Fault()
fault.read('fault.data')
mapping = Mapping(fault)
domain_depth = probdata.domain_depth
domain_width = probdata.domain_width
# num of cells here determined in a similar fashion to that in setrun.py
dx = mapping.fault_width/mfault
num_cells_above = numpy.rint(mapping.fault_depth/dx)
dy = mapping.fault_depth/num_cells_above
mx = int(numpy.ceil(domain_width/dx)) # mx
my = int(numpy.ceil(domain_depth/dy)) # my
mr = mx - mfault
x = linspace(mapping.xcenter-0.5*mapping.fault_width - numpy.floor(mr/2.0)*dx, mapping.xcenter+0.5*mapping.fault_width + numpy.ceil(mr/2.0)*dx, mx+1)
y = linspace(-my*dy, 0.0, my+1)
xc,yc = meshgrid(x,y)
xp,yp = mapping.mapc2p(xc,yc)
figure()
plot(xp*length_scale,yp*length_scale,'k-')
plot(xp.T*length_scale,yp.T*length_scale,'k-')
plot((mapping.xp1*length_scale,mapping.xp2*length_scale),
(mapping.yp1*length_scale,mapping.yp2*length_scale),'-g',linewidth=3)
axis('scaled')
def test(mfault):
from clawpack.clawutil.data import ClawData
probdata = ClawData()
probdata.read('setprob.data',force=True)
fault = dtopotools.Fault(coordinate_specification='top_center')
fault.read('fault.data')
mapping = Mapping(fault)
domain_depth = probdata.domain_depth
domain_width = probdata.domain_width
# num of cells here determined in a similar fashion to that in setrun.py
dx = mapping.fault_width/mfault
num_cells_above = numpy.rint(mapping.fault_depth/dx)
dy = mapping.fault_depth/num_cells_above
mx = int(numpy.ceil(domain_width/dx)) # mx
my = int(numpy.ceil(domain_depth/dy)) # my
mr = mx - mfault
x = linspace(mapping.xcenter-0.5*mapping.fault_width - numpy.floor(mr/2.0)*dx, mapping.xcenter+0.5*mapping.fault_width + numpy.ceil(mr/2.0)*dx, mx+1)
y = linspace(-my*dy, 0.0, my+1)
xc,yc = meshgrid(x,y)
xp,yp = mapping.mapc2p(xc,yc)
figure()
plot(xp,yp,'k-')
plot(xp.T,yp.T,'k-')
plot((mapping.xp1,mapping.xp2),(mapping.yp1,mapping.yp2),'-g')
axis('scaled')
def topological_defect_array(orientation_field):
"""
Returns a matrix of topological defects for the given orientation field.
Each entry in the matrix is the charge of the defect.
"""
JX = np.diff(orientation_field, axis=0)
JY = np.diff(orientation_field, axis=1)
JX += math.pi * (JX < -math.pi/2.0 ) - math.pi * (JX > math.pi/2.0)
JY += math.pi * (JY < -math.pi/2.0 ) - math.pi * (JY > math.pi/2.0)
return np.rint((np.diff(JY, axis=0) - np.diff(JX, axis=1))/math.pi)
def cumulative_score(ground_truth, estimation, largest_error, integer_rounding=True):
if len(ground_truth) != len(estimation):
er = "ground_truth and estimation have different number of elements"
raise Exception(er)
if integer_rounding:
_estimation = numpy.rint(estimation)
else:
_estimation = estimation
N_e_le_j = (numpy.absolute(_estimation - ground_truth) <= largest_error).sum()
return N_e_le_j * 1.0 / len(ground_truth)