def load_ROI_mask(self):
proxy = nib.load(self.FLAIR_FILE)
image_array = np.asarray(proxy.dataobj)
mask = np.ones_like(image_array)
mask[np.where(image_array < 90)] = 0
# img = nib.Nifti1Image(mask, proxy.affine)
# nib.save(img, join(modalities_path,'mask.nii.gz'))
struct_element_size = (20, 20, 20)
mask_augmented = np.pad(mask, [(21, 21), (21, 21), (21, 21)], 'constant', constant_values=(0, 0))
mask_augmented = binary_closing(mask_augmented, structure=np.ones(struct_element_size, dtype=bool)).astype(
np.int)
return mask_augmented[21:-21, 21:-21, 21:-21].astype('bool')
python类save()的实例源码
def export_data(prediction_dir, nii_image_dir, tfrecords_dir, export_dir, transformation=None):
for file_path in os.listdir(prediction_dir):
name, prediction, probability = read_prediction_file(os.path.join(prediction_dir, file_path))
if transformation:
image, ground_truth = get_original_image(os.path.join(tfrecords_dir, name + '.tfrecord'), False)
prediction = transformation.transform_image(prediction, probability, image)
# build cv_predictions .nii image
img = nib.Nifti1Image(prediction, np.eye(4))
img.set_data_dtype(dtype=np.uint8)
path = os.path.join(nii_image_dir, name)
adc_name = next(l for l in os.listdir(path) if 'MR_ADC' in l)
export_image = nib.load(os.path.join(nii_image_dir, name, adc_name, adc_name + '.nii'))
i = export_image.get_data()
i[:] = img.get_data()
# set name to specification and export
_id = next(l for l in os.listdir(path) if 'MR_MTT' in l).split('.')[-1]
export_path = os.path.join(export_dir, 'SMIR.' + name + '.' + _id + '.nii')
nib.save(export_image, os.path.join(export_path))
def batch_works(k):
if k == n_processes - 1:
paths = all_paths[k * int(len(all_paths) / n_processes) : ]
else:
paths = all_paths[k * int(len(all_paths) / n_processes) : (k + 1) * int(len(all_paths) / n_processes)]
for path in paths:
probs = np.load(os.path.join(input_path, path))
pred = np.argmax(probs, axis=3)
fg_prob = 1 - probs[..., 0]
pred = clean_contour(fg_prob, pred)
seg = np.zeros(pred.shape, dtype=np.int16)
seg[pred == 1] = 1
seg[pred == 2] = 2
seg[pred == 3] = 4
img = nib.Nifti1Image(seg, np.eye(4))
nib.save(img, os.path.join(output_path, path.replace('_probs.npy', '.nii.gz')))
def dicom2nifti(folders,outdir=None,extension=None):
'''dicom2nifti will take a list of folders and produce nifti files
in an output directory. If not defined, they will be output in their
original directory.
'''
if isinstance(folders,dict):
folders = list(folders.keys())
if not isinstance(folders,list):
folders = [folders]
outfiles = []
for folder in folders:
lookup = find_dicoms(folder,extension)
for base,dicomlist in lookup.items():
nii = read_series(dicomlist)
if outdir != None:
outfile = "%s/%s.nii.gz" %(outdir,os.path.basename(base))
else:
outfile = "%s/%s.nii.gz" %(base,os.path.basename(base))
bot.info("Saving %s" %outfile)
nibabel.save(nii,outfile)
outfiles.append(outfile)
return outfiles
def symmetrise_axial(self, filename_in, filename_out=None, axis='x', plane_intercept=10,
side_to_copy='below', keep_in_data_dimensions=True):
pfi_in, pfi_out = get_pfi_in_pfi_out(filename_in, filename_out, self.pfo_in, self.pfo_out)
im_segm = nib.load(pfi_in)
data_labels = im_segm.get_data()
data_symmetrised = symmetrise_data(data_labels,
axis=axis,
plane_intercept=plane_intercept,
side_to_copy=side_to_copy,
keep_in_data_dimensions=keep_in_data_dimensions)
im_symmetrised = set_new_data(im_segm, data_symmetrised)
nib.save(im_symmetrised, pfi_out)
print('Symmetrised axis {0}, plane_intercept {1}, image of {2} saved in {3}.'.format(axis, plane_intercept, pfi_in, pfi_out))
return pfi_out
def modify_affine(self, filename_in, affine_in, filename_out, q_form=True, s_form=True,
multiplication_side='left'):
pfi_in, pfi_out = get_pfi_in_pfi_out(filename_in, filename_out, self.pfo_in, self.pfo_out)
if isinstance(affine_in, str):
if affine_in.endswith('.txt'):
aff = np.loadtxt(connect_path_tail_head(self.pfo_in, affine_in))
else:
aff = np.load(connect_path_tail_head(self.pfo_in, affine_in))
elif isinstance(affine_in, np.ndarray):
aff = affine_in
else:
raise IOError('parameter affine_in can be path to an affine matrix .txt or .npy or the numpy array'
'corresponding to the affine transformation.')
im = nib.load(pfi_in)
new_im = modify_affine_transformation(im, aff, q_form=q_form, s_form=s_form,
multiplication_side=multiplication_side)
nib.save(new_im, pfi_out)
def apply_small_rotation(self, filename_in, filename_out, angle=np.pi/6, principal_axis='pitch'):
if isinstance(angle, list):
assert isinstance(principal_axis, list)
assert len(principal_axis) == len(angle)
new_aff = np.identity(4)
for pa, an in zip(principal_axis, angle):
aff = get_small_orthogonal_rotation(theta=an, principal_axis=pa)
new_aff = new_aff.dot(aff)
else:
new_aff = get_small_orthogonal_rotation(theta=angle, principal_axis=principal_axis)
pfi_in, pfi_out = get_pfi_in_pfi_out(filename_in, filename_out, self.pfo_in, self.pfo_out)
im = nib.load(pfi_in)
new_im = modify_affine_transformation(im_input=im, new_aff=new_aff, q_form=True, s_form=True,
multiplication_side='right')
nib.save(new_im, pfi_out)
def modify_translational_part(self, filename_in, filename_out, new_translation):
pfi_in, pfi_out = get_pfi_in_pfi_out(filename_in, filename_out, self.pfo_in, self.pfo_out)
im = nib.load(pfi_in)
if isinstance(new_translation, str):
if new_translation.endswith('.txt'):
tr = np.loadtxt(connect_path_tail_head(self.pfo_in, new_translation))
else:
tr = np.load(connect_path_tail_head(self.pfo_in, new_translation))
elif isinstance(new_translation, np.ndarray):
tr = new_translation
elif isinstance(new_translation, list):
tr = np.array(new_translation)
else:
raise IOError('parameter new_translation can be path to an affine matrix .txt or .npy or the numpy array'
'corresponding to the new intended translational part.')
new_im = replace_translational_part(im, tr)
nib.save(new_im, pfi_out)
def relabel(self, pfi_input, pfi_output=None, list_old_labels=(), list_new_labels=()):
"""
Masks of :func:`labels_manager.tools.manipulations.relabeller.relabeller` using filename
"""
pfi_in, pfi_out = get_pfi_in_pfi_out(pfi_input, pfi_output, self.pfo_in,
self.pfo_out)
im_labels = nib.load(pfi_in)
data_labels = im_labels.get_data()
data_relabelled = relabeller(data_labels, list_old_labels=list_old_labels,
list_new_labels=list_new_labels)
im_relabelled = set_new_data(im_labels, data_relabelled)
nib.save(im_relabelled, pfi_out)
print('Relabelled image {0} saved in {1}.'.format(pfi_in, pfi_out))
return pfi_out
def tensor2fa(tensors, tensor_name, dti, derivdir, qcdir):
'''
outdir: location of output directory.
fname: name of output fa map file. default is none (name created based on
input file)
'''
dti_data = nb.load(dti)
affine = dti_data.get_affine()
dti_data = dti_data.get_data()
# create FA map
FA = fractional_anisotropy(tensors.evals)
FA[np.isnan(FA)] = 0
# generate the RGB FA map
FA = np.clip(FA, 0, 1)
RGB = color_fa(FA, tensors.evecs)
fname = os.path.split(tensor_name)[1].split(".")[0] + '_fa_rgb.nii.gz'
fa = nb.Nifti1Image(np.array(255 * RGB, 'uint8'), affine)
nb.save(fa, derivdir + fname)
fa_pngs(fa, fname, qcdir)
def resample(self, base, ingested, template):
"""
Resamples the image such that images which have already been aligned
in real coordinates also overlap in the image/voxel space.
**Positional Arguments**
base:
- Image to be aligned
ingested:
- Name of image after alignment
template:
- Image that is the target of the alignment
"""
# Loads images
template_im = nb.load(template)
base_im = nb.load(base)
# Aligns images
target_im = nl.resample_img(base_im,
target_affine=template_im.get_affine(),
target_shape=template_im.get_data().shape,
interpolation="nearest")
# Saves new image
nb.save(target_im, ingested)
pass
def test_register_dwi():
fdata, fbval, fbvec = dpd.get_data('small_64D')
with nbtmp.InTemporaryDirectory() as tmpdir:
# Use an abbreviated data-set:
img = nib.load(fdata)
data = img.get_data()[..., :10]
nib.save(nib.Nifti1Image(data, img.affine),
op.join(tmpdir, 'data.nii.gz'))
# Convert from npy to txt:
bvals = np.load(fbval)
bvecs = np.load(fbvec)
np.savetxt(op.join(tmpdir, 'bvals.txt'), bvals[:10])
np.savetxt(op.join(tmpdir, 'bvecs.txt'), bvecs[:10])
reg_file = register_dwi(op.join(tmpdir, 'data.nii.gz'),
op.join(tmpdir, 'bvals.txt'),
op.join(tmpdir, 'bvecs.txt'))
npt.assert_(op.exists(reg_file))
def make_dti_data(out_fbval, out_fbvec, out_fdata, out_shape=(5, 6, 7)):
"""
Create a synthetic data-set with a single shell acquisition
out_fbval, out_fbvec, out_fdata : str
Full paths to generated data and bval/bvec files
out_shape : tuple
The 3D shape of the output volum
"""
fimg, fbvals, fbvecs = dpd.get_data('small_64D')
img = nib.load(fimg)
bvals, bvecs = dio.read_bvals_bvecs(fbvals, fbvecs)
gtab = dpg.gradient_table(bvals, bvecs)
# Simulate a signal based on the DTI model:
signal = single_tensor(gtab, S0=100)
DWI = np.zeros(out_shape + (len(gtab.bvals), ))
DWI[:] = signal
nib.save(nib.Nifti1Image(DWI, img.affine), out_fdata)
np.savetxt(out_fbval, bvals)
np.savetxt(out_fbvec, bvecs)
def exportNyp(self, event):
"""Export histogram counts as a numpy array."""
outFileName = self.basename + '_identifier' \
+ '_pcMax' + str(self.initTpl[0]) \
+ '_pcMin' + str(self.initTpl[1]) \
+ '_sc' + str(int(self.initTpl[2]))
if self.segmType == 'ncut':
outFileName = outFileName.replace('identifier', 'volHistLabels')
outFileName = outFileName.replace('.', 'pt')
np.save(outFileName, self.volHistMask)
print "successfully exported histogram colors as: \n" + \
outFileName
elif self.segmType == 'main':
outFileName = outFileName.replace('identifier', 'volHist')
outFileName = outFileName.replace('.', 'pt')
np.save(outFileName, self.counts)
print "successfully exported histogram counts as: \n" + \
outFileName
else:
return
def preprocess(inputfile, outputfile, order=0, df=None, input_key=None, output_key=None):
img = nib.load(inputfile)
data = img.get_data()
affine = img.affine
zoom = img.header.get_zooms()[:3]
data, affine = reslice(data, affine, zoom, (1., 1., 1.), order)
data = np.squeeze(data)
data = np.pad(data, [(0, 256 - len_) for len_ in data.shape], "constant")
if order == 0:
if df is not None:
tmp = np.zeros_like(data)
for target, source in zip(df[output_key], df[input_key]):
tmp[np.where(data == source)] = target
data = tmp
data = np.int32(data)
assert data.ndim == 3, data.ndim
else:
data_sub = data - gaussian_filter(data, sigma=1)
img = sitk.GetImageFromArray(np.copy(data_sub))
img = sitk.AdaptiveHistogramEqualization(img)
data_clahe = sitk.GetArrayFromImage(img)[:, :, :, None]
data = np.concatenate((data_clahe, data[:, :, :, None]), 3)
data = (data - np.mean(data, (0, 1, 2))) / np.std(data, (0, 1, 2))
assert data.ndim == 4, data.ndim
assert np.allclose(np.mean(data, (0, 1, 2)), 0.), np.mean(data, (0, 1, 2))
assert np.allclose(np.std(data, (0, 1, 2)), 1.), np.std(data, (0, 1, 2))
data = np.float32(data)
img = nib.Nifti1Image(data, affine)
nib.save(img, outputfile)
def roi_from_bbox(
input_file,
bbox,
output_file):
""" Create a ROI image from a bounding box.
Parameters
----------
input_file: str
the reference image where the bbox is defined.
bbox: 6-uplet
the corner of the bbox in voxel coordinates: xmin, xmax, ymin, ymax,
zmin, zmax.
output_file: str
the desired ROI image file.
"""
# Load the reference image and generate a grid
im = nibabel.load(input_file)
xv, yv, zv = numpy.meshgrid(
numpy.linspace(0, im.shape[0] - 1, im.shape[0]),
numpy.linspace(0, im.shape[1] - 1, im.shape[1]),
numpy.linspace(0, im.shape[2] - 1, im.shape[2]))
xv = xv.astype(int)
yv = yv.astype(int)
zv = zv.astype(int)
# Intersect the grid with the bbox
xa = numpy.bitwise_and(xv >= bbox[0], xv <= bbox[1])
ya = numpy.bitwise_and(yv >= bbox[2], yv <= bbox[3])
za = numpy.bitwise_and(zv >= bbox[4], zv <= bbox[5])
# Generate bbox indices
indices = numpy.bitwise_and(numpy.bitwise_and(xa, ya), za)
# Generate/save ROI
roi = numpy.zeros(im.shape, dtype=int)
roi[xv[indices].tolist(), yv[indices].tolist(), zv[indices].tolist()] = 1
roi_im = nibabel.Nifti1Image(roi, affine=im.get_affine())
nibabel.save(roi_im, output_file)
def merge_fibers(tractograms, tempdir=None):
""" Merge tractograms.
Parameters
----------
tractograms: list of str
paths to the input tractograms.
tempdir: str, default None
a temporary directory to store intermediate tractogram.
Returns
-------
merge_tractogram_file: str
all the streamlines in one TRK file.
"""
# Check existence of input file
for path in tractograms:
if not os.path.isfile(path):
raise ValueError("File does not exist: {0}.".format(path))
# Create a temporary directory to store an intermediate tractogram
tempdir = tempfile.mkdtemp(prefix="tractconverter_", dir=tempdir)
# Combine tractograms in one file
trk = nibabel.streamlines.load(tractograms[0])
for trk_path in tractograms[1:]:
part_trk = nibabel.streamlines.load(trk_path)
trk.streamlines.extend(part_trk.streamlines)
merge_tractogram_file = os.path.join(tempdir, "tmp.trk")
trk.save(merge_tractogram_file)
return merge_tractogram_file
def extract_image(in_file, index, out_file=None):
""" Extract the image at 'index' position.
Parameters
----------
in_file: str (mandatory)
the input image.
index: int (mandatory)
the index of last image dimention to extract.
out_file: str (optional, default None)
the name of the extracted image file.
Returns
-------
out_file: str
the name of the extracted image file.
"""
# Set default output if necessary
dirname = os.path.dirname(in_file)
basename = os.path.basename(in_file).split(".")[0]
if out_file is None:
out_file = os.path.join(
dirname, "extract{0}_{1}.nii.gz".format(index, basename))
# Extract the image of interest
image = nibabel.load(in_file)
affine = image.get_affine()
extracted_array = image.get_data()[..., index]
extracted_image = nibabel.Nifti1Image(extracted_array, affine)
nibabel.save(extracted_image, out_file)
return out_file
def saveImageAsNifti(imageToSave,
imageName,
imageOriginalName,
imageType):
printFileNames = False
if printFileNames == True:
print(" ... Saving image in {}".format(imageName))
[imageData,img_proxy] = load_nii(imageOriginalName, printFileNames)
# Generate the nii file
niiToSave = nib.Nifti1Image(imageToSave, img_proxy.affine)
niiToSave.set_data_dtype(imageType)
dim = len(imageToSave.shape)
zooms = list(img_proxy.header.get_zooms()[:dim])
if len(zooms) < dim :
zooms = zooms + [1.0]*(dim-len(zooms))
niiToSave.header.set_zooms(zooms)
nib.save(niiToSave, imageName)
print "... Image succesfully saved..."
## ========= For Matlab files ========== ##
def save(self, image, outpath):
""" A method that save the image data and associated metadata.
Parameters
----------
image: Image
the image to be saved.
outpath: str
the path where the the image will be saved.
"""
diag = (1. / image.spacing).tolist() + [1]
_image = nibabel.Nifti1Image(image.data, numpy.diag(diag))
nibabel.save(_image, outpath)
def decompose_vol2cube(vol_data, batch_size, cube_size, n_chn, ita):
cube_list = []
# get parameters for decompose
fold, ovlap = fit_cube_param(vol_data.shape, cube_size, ita)
dim = np.asarray(vol_data.shape)
# decompose
for R in range(0, fold[0]):
r_s = R*cube_size - R*ovlap[0]
r_e = r_s + cube_size
if r_e >= dim[0]:
r_s = dim[0] - cube_size
r_e = r_s + cube_size
for C in range(0, fold[1]):
c_s = C*cube_size - C*ovlap[1]
c_e = c_s + cube_size
if c_e >= dim[1]:
c_s = dim[1] - cube_size
c_e = c_s + cube_size
for H in range(0, fold[2]):
h_s = H*cube_size - H*ovlap[2]
h_e = h_s + cube_size
if h_e >= dim[2]:
h_s = dim[2] - cube_size
h_e = h_s + cube_size
# partition multiple channels
cube_temp = vol_data[r_s:r_e, c_s:c_e, h_s:h_e]
cube_batch = np.zeros([batch_size, cube_size, cube_size, cube_size, n_chn]).astype('float32')
cube_batch[0, :, :, :, 0] = copy.deepcopy(cube_temp)
# save
cube_list.append(cube_batch)
return cube_list
# compose list of label cubes into a label volume
def remove_minor_cc(vol_data, rej_ratio, rename_map):
"""Remove small connected components refer to rejection ratio"""
"""Usage
# rename_map = [0, 205, 420, 500, 550, 600, 820, 850]
# nii_path = '/home/xinyang/project_xy/mmwhs2017/dataset/ct_output/test/test_4.nii'
# vol_file = nib.load(nii_path)
# vol_data = vol_file.get_data().copy()
# ref_affine = vol_file.affine
# rem_vol = remove_minor_cc(vol_data, rej_ratio=0.2, class_n=8, rename_map=rename_map)
# # save
# rem_path = 'rem_cc.nii'
# rem_vol_file = nib.Nifti1Image(rem_vol, ref_affine)
# nib.save(rem_vol_file, rem_path)
#===# possible be parallel in future
"""
rem_vol = copy.deepcopy(vol_data)
class_n = len(rename_map)
# retrieve all classes
for c in range(1, class_n):
print 'processing class %d...' % c
class_idx = (vol_data==rename_map[c])*1
class_vol = np.sum(class_idx)
labeled_cc, num_cc = measurements.label(class_idx)
# retrieve all connected components in this class
for cc in range(1, num_cc+1):
single_cc = ((labeled_cc==cc)*1)
single_vol = np.sum(single_cc)
# remove if too small
if single_vol / (class_vol*1.0) < rej_ratio:
rem_vol[labeled_cc==cc] = 0
return rem_vol
def test_adjust_nifti_replace_translational_part_F_F():
pfi_input = jph(root_dir, 'data_examples', 'acee.nii.gz')
if not os.path.exists(pfi_input):
generate_figures()
translation = [1, 2, 3]
pfi_output = jph(root_dir, 'data_examples', 'acee_new_header.nii.gz')
im_a_cee = nib.load(pfi_input)
initial_affine = im_a_cee.affine
initial_qform = im_a_cee.get_qform()
initial_sform = im_a_cee.get_sform()
new_im = replace_translational_part(im_a_cee, translation, q_form=False, s_form=False)
nib.save(new_im, pfi_output)
im_a_cee_new = nib.load(pfi_output)
final_affine = im_a_cee_new.affine
final_qform = im_a_cee_new.get_qform()
final_sform = im_a_cee_new.get_sform()
new_transformation_expected = np.copy(initial_affine)
new_transformation_expected[:3, 3] = translation
# see documentaiton http://nipy.org/nibabel/nifti_images.html#choosing-image-affine
if im_a_cee.header['sform_code'] > 0 : # affine -> sform affine
assert_array_almost_equal(initial_affine, final_affine)
assert_array_almost_equal(initial_qform, final_qform)
assert_array_almost_equal(initial_sform, final_sform)
elif im_a_cee.header['qform_code'] > 0: # affine -> qform affine
assert_array_almost_equal(initial_affine, final_affine)
assert_array_almost_equal(initial_qform, final_qform)
assert_array_almost_equal(initial_sform, final_sform)
else: # affine -> header-off affine
assert_array_almost_equal(new_transformation_expected, final_affine)
assert_array_almost_equal(initial_qform, final_qform)
assert_array_almost_equal(initial_sform, final_sform)
def test_adjust_nifti_translation_path_T_T():
pfi_input = jph(root_dir, 'data_examples', 'acee.nii.gz')
if not os.path.exists(pfi_input):
generate_figures()
translation = [1, 2, 3]
pfi_output = jph(root_dir, 'data_examples', 'acee_new_header.nii.gz')
im_a_cee = nib.load(pfi_input)
initial_affine = im_a_cee.affine # sform
initial_qform = im_a_cee.get_qform()
initial_sform = im_a_cee.get_sform()
new_im = replace_translational_part(im_a_cee, translation, q_form=True, s_form=True)
nib.save(new_im, pfi_output)
im_a_cee_new = nib.load(pfi_output)
final_affine = im_a_cee_new.affine
final_qform = im_a_cee_new.get_qform()
final_sform = im_a_cee_new.get_sform()
# all must be new
new_transformation = np.copy(initial_affine)
new_transformation[:3, 3] = translation
assert_array_almost_equal(new_transformation, final_qform)
assert_array_almost_equal(new_transformation, final_affine)
assert_array_almost_equal(new_transformation, final_sform)
# check all of the original image is still the same
im_a_cee_reloaded = nib.load(pfi_input)
reloaded_affine = im_a_cee_reloaded.affine # sform
reloaded_qform = im_a_cee_reloaded.get_qform()
reloaded_sform = im_a_cee_reloaded.get_sform()
assert_array_almost_equal(initial_affine, reloaded_affine)
assert_array_almost_equal(initial_qform, reloaded_qform)
assert_array_almost_equal(initial_sform, reloaded_sform)
def extend_slice_new_dimension(self, pfi_input, pfi_output=None, new_axis=3, num_slices=10):
pfi_in, pfi_out = get_pfi_in_pfi_out(pfi_input, pfi_output, self.pfo_in, self.pfo_out)
im_slice = nib.load(pfi_in)
data_slice = im_slice.get_data()
data_extended = np.stack([data_slice, ] * num_slices, axis=new_axis)
im_extended = set_new_data(im_slice, data_extended)
nib.save(im_extended, pfi_out)
print('Extended image of {0} saved in {1}.'.format(pfi_in, pfi_out))
return pfi_out
def merge_from_4d(self, pfi_input, pfi_output=None):
pfi_in, pfi_out = get_pfi_in_pfi_out(pfi_input, pfi_output, self.pfo_in, self.pfo_out)
im_labels_4d = nib.load(pfi_in)
data_labels_4d = im_labels_4d.get_data()
assert len(data_labels_4d.shape) == 4
data_merged_in_3d = merge_labels_from_4d(data_labels_4d)
im_merged_in_3d = set_new_data(im_labels_4d, data_merged_in_3d)
nib.save(im_merged_in_3d, pfi_out)
print('Merged labels from 4d image {0} saved in {1}.'.format(pfi_in, pfi_out))
return pfi_out
def cut_4d_volume_with_a_1_slice_mask(self, pfi_input, filename_mask, pfi_output=None):
pfi_in, pfi_out = get_pfi_in_pfi_out(pfi_input, pfi_output, self.pfo_in, self.pfo_out)
pfi_mask = connect_path_tail_head(self.pfo_in, filename_mask)
im_dwi = nib.load(pfi_in)
im_mask = nib.load(pfi_mask)
im_masked = cut_4d_volume_with_a_1_slice_mask_nib(im_dwi, im_mask)
nib.save(im_masked, pfi_out)
def normalise_below_label(self, pfi_input, pfi_output, filename_segm, labels, stats=np.median):
pfi_in, pfi_out = get_pfi_in_pfi_out(pfi_input, pfi_output, self.pfo_in, self.pfo_out)
pfi_segm = connect_path_tail_head(self.pfo_in, filename_segm)
im_input = nib.load(pfi_in)
im_segm = nib.load(pfi_segm)
labels_list, labels_names = labels_query(labels, im_segm.get_data())
im_out = normalise_below_labels(im_input, labels_list, labels, stats=stats, exclude_first_label=True)
nib.save(im_out, pfi_out)
def get_contour_from_segmentation(self, pfi_input_segmenation, pfi_output_contour, omit_axis=None, verbose=0):
pfi_in, pfi_out = get_pfi_in_pfi_out(pfi_input_segmenation, pfi_output_contour, self.pfo_in, self.pfo_out)
im_segm = nib.load(pfi_in)
im_contour = contour_from_segmentation(im_segm, omit_axis=omit_axis, verbose=verbose)
nib.save(im_contour, pfi_output_contour)
def erase_labels(self, pfi_input, pfi_output=None, labels_to_erase=()):
pfi_in, pfi_out = get_pfi_in_pfi_out(pfi_input, pfi_output, self.pfo_in, self.pfo_out)
im_labels = nib.load(pfi_in)
data_labels = im_labels.get_data()
data_erased = erase_labels(data_labels, labels_to_erase=labels_to_erase)
im_erased = set_new_data(im_labels, data_erased)
nib.save(im_erased, pfi_out)
print('Erased labels from image {0} saved in {1}.'.format(pfi_in, pfi_out))
return pfi_out