def values_below_labels(self, segmentation_filename, anatomy_filename, labels=None):
"""
:param segmentation_filename:
:param anatomy_filename:
:param labels:
:return: pandas series with label names and corresponding vectors of labels values
"""
pfi_anat = connect_path_tail_head(self.pfo_in, anatomy_filename)
pfi_segm = connect_path_tail_head(self.pfo_in, segmentation_filename)
assert os.path.exists(pfi_anat)
im_anat = nib.load(pfi_anat)
assert os.path.exists(pfi_segm)
im_segm = nib.load(pfi_segm)
labels_list, labels_names = labels_query(labels, segmentation_array=im_segm.get_data())
labels_values = get_values_below_labels(im_segm, im_anat, labels_list)
return pa.Series(labels_values, index=labels_names)
python类load()的实例源码
def global_dist(self, segm_1_filename, segm_2_filename, where_to_save=None,
global_metrics=(global_outline_error, global_dice_score)):
pfi_segm1 = connect_path_tail_head(self.pfo_in, segm_1_filename)
pfi_segm2 = connect_path_tail_head(self.pfo_in, segm_2_filename)
assert os.path.exists(pfi_segm1), pfi_segm1
assert os.path.exists(pfi_segm2), pfi_segm2
if self.verbose > 0:
print("\nGlobal distances between segmentations: \n -> {0} \n -> {1} "
"\nComputations started!".format(pfi_segm1, pfi_segm2))
im_segm1 = nib.load(pfi_segm1)
im_segm2 = nib.load(pfi_segm2)
se_global_distances = pa.Series(np.array([d(im_segm1, im_segm2) for d in global_metrics]),
index=[d.__name__ for d in global_metrics])
if where_to_save is not None:
where_to_save = connect_path_tail_head(self.pfo_out, where_to_save)
se_global_distances.to_pickle(where_to_save)
return se_global_distances
def relabel(self, pfi_input, pfi_output=None, list_old_labels=(), list_new_labels=()):
"""
Masks of :func:`labels_manager.tools.manipulations.relabeller.relabeller` using filename
"""
pfi_in, pfi_out = get_pfi_in_pfi_out(pfi_input, pfi_output, self.pfo_in,
self.pfo_out)
im_labels = nib.load(pfi_in)
data_labels = im_labels.get_data()
data_relabelled = relabeller(data_labels, list_old_labels=list_old_labels,
list_new_labels=list_new_labels)
im_relabelled = set_new_data(im_labels, data_relabelled)
nib.save(im_relabelled, pfi_out)
print('Relabelled image {0} saved in {1}.'.format(pfi_in, pfi_out))
return pfi_out
def toJSON(stats, seg_file, structure_map):
"""Combine stats files to a single JSON file"""
import json
import os
import nibabel as nb
import numpy as np
img = nb.load(seg_file)
data = img.get_data()
voxel2vol = np.prod(img.header.get_zooms())
idx = np.unique(data)
reverse_map = {k:v for v, k in structure_map}
out_dict = dict(zip([reverse_map[val] for val in idx], np.bincount(data.flatten())[idx]))
for key in out_dict.keys():
out_dict[key] = [out_dict[key], voxel2vol * out_dict[key]]
mapper = dict([(0, 'csf'), (1, 'gray'), (2, 'white')])
out_dict.update(**{mapper[idx]: val for idx, val in enumerate(stats)})
out_file = 'segstats.json'
with open(out_file, 'wt') as fp:
json.dump(out_dict, fp, sort_keys=True, indent=4, separators=(',', ': '))
return os.path.abspath(out_file)
def load_inference(base_path='./data/Test/Test_Subject', nii_index=0):
"""Load nii data, whose name is, for example, 'Test_Subject01.nii'.
Arguments:
nii_index: counts from 0.
"""
filename = base_path + str(nii_index + 1).zfill(2) + '.nii'
xs = nib.load(filename).get_data()
# Crop black region to reduce nii volumes.
dummy_ys = np.zeros_like(xs)
xs, *_ = _banish_darkness(xs, dummy_ys)
# Normalize images.
local_max = np.max(xs, axis=(1, 2), keepdims=True)
local_min = np.min(xs, axis=(1, 2), keepdims=True)
xs = (xs - local_min) / (local_max - local_min)
return xs[None, ..., None]
def select_training_voxels(input_masks, threshold=2, datatype=np.float32):
"""
Select voxels for training based on a intensity threshold
Inputs:
- input_masks: list containing all subject image paths for a single modality
- threshold: minimum threshold to apply (after normalizing images with 0 mean and 1 std)
Output:
- rois: list where each element contains the subject binary mask for selected voxels [len(x), len(y), len(z)]
"""
# load images and normalize their intensities
images = [load_nii(image_name).get_data() for image_name in input_masks]
images_norm = [(im.astype(dtype=datatype) - im[np.nonzero(im)].mean()) / im[np.nonzero(im)].std() for im in images]
# select voxels with intensity higher than threshold
rois = [image > threshold for image in images_norm]
return rois
def select_voxels_from_previous_model(model, train_x_data, options):
"""
Select training voxels from image segmentation masks
"""
# get_scan names and number of modalities used
scans = train_x_data.keys()
modalities = train_x_data[scans[0]].keys()
# select voxels for training. Discard CSF and darker WM in FLAIR.
# flair_scans = [train_x_data[s]['FLAIR'] for s in scans]
# selected_voxels = select_training_voxels(flair_scans, options['min_th'])
# evaluate training scans using the learned model and extract voxels with probability higher than 0.5
seg_mask = [test_scan(model, dict(train_x_data.items()[s:s+1]), options, save_nifti = False) > 0.5 for s in range(len(scans))]
# check candidate segmentations:
# if no voxels have been selected, return candidate voxels on FLAIR modality > 2
flair_scans = [train_x_data[s]['FLAIR'] for s in scans]
images = [load_nii(name).get_data() for name in flair_scans]
images_norm = [(im.astype(dtype=np.float32) - im[np.nonzero(im)].mean()) / im[np.nonzero(im)].std() for im in images]
seg_mask = [im > 2 if np.sum(seg) == 0 else seg for im, seg in zip(images_norm, seg_mask)]
return seg_mask
def default_file_reader(x):
def pil_loader(path):
return Image.open(path).convert('RGB')
def npy_loader(path):
return np.load(path)
def nifti_loader(path):
return nibabel.load(path).get_data()
if isinstance(x, str):
if x.endswith('.npy'):
x = npy_loader(x)
elif x.endsiwth('.nii.gz'):
x = nifti_loader(x)
else:
try:
x = pil_loader(x)
except:
raise ValueError('File Format is not supported')
#else:
#raise ValueError('x should be string, but got %s' % type(x))
return x
def create_3D_distance_matrix(vox_ijk, epi_fname):
"""Compute distance between voxels in the volume.
Parameters
----------
vox_ijk : n x 3 array
Indices of voxels included in the ROI.
epi_fname : file path
Path to image defining the volume space.
Returns
-------
dmat : array
Dense square distance matrix.
"""
aff = nib.load(epi_fname).affine
vox_ras = nib.affines.apply_affine(aff, vox_ijk)
dmat = squareform(pdist(vox_ras))
return dmat
def extract_data(exp, subj, roi_info):
"""Extract timeseries data from each ROI."""
ts_data = {roi: [] for roi in roi_info}
n_runs = dict(dots=12, sticks=12, rest=8)[exp]
anal_dir = PROJECT["analysis_dir"]
ts_temp = op.join(anal_dir, exp, subj, "reg", "epi", "unsmoothed",
"run_{}", "res4d_xfm.nii.gz")
# Extract ROI data from each run, loading images only once
for run in range(1, n_runs + 1):
run_data = nib.load(ts_temp.format(run)).get_data()
for roi, info in roi_info.iteritems():
roi_ts = extract_from_volume(run_data, info["vox_ijk"])
ts_data[roi].append(roi_ts)
# Combine across runs
ts_data = {roi: np.hstack(ts_data[roi]).T for roi in roi_info}
for roi in roi_info:
assert ts_data[roi].shape[1] == len(roi_info[roi]["vox_ijk"])
return ts_data
def create_histo(folder_path, min_val, max_val, num_bins=None):
if num_bins is None:
num_bins = int(max_val - min_val)
print num_bins
histogram = {}
for f in os.listdir(folder_path):
im = nib.load(os.path.join(folder_path, f))
data = im.get_data()
hist, bins = np.histogram(data, num_bins, range=(min_val, max_val))
count = 0
for el in bins[0:-1]:
try:
histogram[el] += hist[count]
except:
histogram[el] = hist[count]
count += 1
return histogram.items()
def reg_dti_pngs(dti, loc, atlas, outdir):
"""
outdir: directory where output png file is saved
fname: name of output file WITHOUT FULL PATH. Path provided in outdir.
"""
atlas_data = nb.load(atlas).get_data()
dti_data = nb.load(dti).get_data()
b0_data = dti_data[:,:,:,loc]
cmap1 = LinearSegmentedColormap.from_list('mycmap1', ['black', 'magenta'])
cmap2 = LinearSegmentedColormap.from_list('mycmap2', ['black', 'green'])
fig = plot_overlays(atlas_data, b0_data, (cmap1, cmap2))
# name and save the file
fname = os.path.split(dti)[1].split(".")[0] + '.png'
plt.savefig(outdir + '/' + fname, format='png')
def tensor2fa(tensors, tensor_name, dti, derivdir, qcdir):
'''
outdir: location of output directory.
fname: name of output fa map file. default is none (name created based on
input file)
'''
dti_data = nb.load(dti)
affine = dti_data.get_affine()
dti_data = dti_data.get_data()
# create FA map
FA = fractional_anisotropy(tensors.evals)
FA[np.isnan(FA)] = 0
# generate the RGB FA map
FA = np.clip(FA, 0, 1)
RGB = color_fa(FA, tensors.evecs)
fname = os.path.split(tensor_name)[1].split(".")[0] + '_fa_rgb.nii.gz'
fa = nb.Nifti1Image(np.array(255 * RGB, 'uint8'), affine)
nb.save(fa, derivdir + fname)
fa_pngs(fa, fname, qcdir)
def resample(self, base, ingested, template):
"""
Resamples the image such that images which have already been aligned
in real coordinates also overlap in the image/voxel space.
**Positional Arguments**
base:
- Image to be aligned
ingested:
- Name of image after alignment
template:
- Image that is the target of the alignment
"""
# Loads images
template_im = nb.load(template)
base_im = nb.load(base)
# Aligns images
target_im = nl.resample_img(base_im,
target_affine=template_im.get_affine(),
target_shape=template_im.get_data().shape,
interpolation="nearest")
# Saves new image
nb.save(target_im, ingested)
pass
def _3d_in_file(in_file):
''' if self.inputs.in_file is 3d, return it.
if 4d, pick an arbitrary volume and return that.
if in_file is a list of files, return an arbitrary file from
the list, and an arbitrary volume from that file
'''
in_file = filemanip.filename_to_list(in_file)[0]
try:
in_file = nb.load(in_file)
except AttributeError:
in_file = in_file
if in_file.get_data().ndim == 3:
return in_file
return nlimage.index_img(in_file, 0)
def test_ROIsPlot(oasis_dir):
""" the BET report capable test """
import nibabel as nb
import numpy as np
labels = nb.load(os.path.join(oasis_dir, 'T_template0_glm_4labelsJointFusion.nii.gz'))
data = labels.get_data()
out_files = []
ldata = np.zeros_like(data)
for i, l in enumerate([1, 3, 4, 2]):
ldata[data == l] = 1
out_files.append(os.path.abspath('label%d.nii.gz' % i))
lfile = nb.Nifti1Image(ldata, labels.affine, labels.header)
lfile.to_filename(out_files[-1])
roi_rpt = ROIsPlot(
generate_report=True,
in_file=os.path.join(oasis_dir, 'T_template0.nii.gz'),
in_mask=out_files[-1],
in_rois=out_files[:-1],
colors=['g', 'y']
)
_smoke_test_report(roi_rpt, 'testROIsPlot.svg')
def _copyxform(ref_image, out_image, message=None):
# Read in reference and output
resampled = nb.load(out_image)
orig = nb.load(ref_image)
if not np.allclose(orig.affine, resampled.affine):
LOG.debug(
'Affines of input and reference images do not match, '
'FMRIPREP will set the reference image headers. '
'Please, check that the x-form matrices of the input dataset'
'are correct and manually verify the alignment of results.')
# Copy xform infos
qform, qform_code = orig.header.get_qform(coded=True)
sform, sform_code = orig.header.get_sform(coded=True)
header = resampled.header.copy()
header.set_qform(qform, int(qform_code))
header.set_sform(sform, int(sform_code))
header['descrip'] = 'xform matrices modified by %s.' % (message or '(unknown)')
newimg = resampled.__class__(resampled.get_data(), orig.affine, header)
newimg.to_filename(out_image)
def getSlices(self,paths):
image,truth = paths
image = nib.load(image).get_data(); truth = nib.load(truth).get_data()
slicesWithValues = [unique(s) for s in where(truth>0)]
sliceAxis = argmin([len(s) for s in slicesWithValues])
slicesWithValues = slicesWithValues[sliceAxis]
slc = repeat(-1,3); slc[sliceAxis] = slicesWithValues[0]
if not self.padding is None:
image, truth = [padImage(im,self.padding) for im in (image[slc][0],truth[slc][0])]
else:
image, truth = (image[slc][0],truth[slc][0])
return (image,truth)
def getSlices(self,paths):
image,truth = paths
image = nib.load(image).get_data(); truth = nib.load(truth).get_data()
slicesWithValues = [unique(s) for s in where(truth>0)]
sliceAxis = argmin([len(s) for s in slicesWithValues])
slicesWithValues = slicesWithValues[sliceAxis]
slc = repeat(-1,3); slc[sliceAxis] = slicesWithValues[0]
if not self.padding is None:
image, truth = [padImage(im,self.padding) for im in (image[slc][0],truth[slc][0])]
else:
image, truth = (image[slc][0],truth[slc][0])
return (image,truth)
def preprocess(inputfile, outputfile, order=0, df=None, input_key=None, output_key=None):
img = nib.load(inputfile)
data = img.get_data()
affine = img.affine
zoom = img.header.get_zooms()[:3]
data, affine = reslice(data, affine, zoom, (1., 1., 1.), order)
data = np.squeeze(data)
data = np.pad(data, [(0, 256 - len_) for len_ in data.shape], "constant")
if order == 0:
if df is not None:
tmp = np.zeros_like(data)
for target, source in zip(df[output_key], df[input_key]):
tmp[np.where(data == source)] = target
data = tmp
data = np.int32(data)
assert data.ndim == 3, data.ndim
else:
data_sub = data - gaussian_filter(data, sigma=1)
img = sitk.GetImageFromArray(np.copy(data_sub))
img = sitk.AdaptiveHistogramEqualization(img)
data_clahe = sitk.GetArrayFromImage(img)[:, :, :, None]
data = np.concatenate((data_clahe, data[:, :, :, None]), 3)
data = (data - np.mean(data, (0, 1, 2))) / np.std(data, (0, 1, 2))
assert data.ndim == 4, data.ndim
assert np.allclose(np.mean(data, (0, 1, 2)), 0.), np.mean(data, (0, 1, 2))
assert np.allclose(np.std(data, (0, 1, 2)), 1.), np.std(data, (0, 1, 2))
data = np.float32(data)
img = nib.Nifti1Image(data, affine)
nib.save(img, outputfile)