/usr/share/pyshared/mvpa2/datasets/mri.py is in python-mvpa2 2.1.0-1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Dataset for magnetic resonance imaging (MRI) data.
This module offers functions to import into PyMVPA MRI data from files
in any format supported by NiBabel_ (e.g. NIfTI, MINC, Analyze), and
export PyMVPA datasets back into data formats supported by NiBabel_.
.. _NiBabel: http://nipy.sourceforge.net/nibabel
"""
__docformat__ = 'restructuredtext'
from mvpa2.base import externals
externals.exists('nibabel', raise_=True)
import sys
import numpy as np
from mvpa2.support.copy import deepcopy
from mvpa2.misc.support import Event
from mvpa2.base.collections import DatasetAttribute
from mvpa2.base.dataset import _expand_attribute
if __debug__:
from mvpa2.base import debug
from mvpa2.datasets.base import Dataset
from mvpa2.mappers.fx import _uniquemerge2literal
from mvpa2.mappers.flatten import FlattenMapper
from mvpa2.mappers.boxcar import BoxcarMapper
from mvpa2.base import warning
def _data2img(data, hdr=None, imgtype=None):
# input data is t,x,y,z
# let's try whether we can get it done with nibabel
import nibabel
if imgtype is None:
# default is NIfTI1
imgtype = nibabel.Nifti1Image
else:
itype = imgtype
def _img2data(src):
# break early of nothing has been given
# XXX feels a little strange to handle this so deep inside, but well...
if src is None:
return None
# let's try whether we can get it done with nibabel
import nibabel
if isinstance(src, basestring):
# filename
img = nibabel.load(src)
else:
# assume this is an image already
img = src
if isinstance(img, nibabel.spatialimages.SpatialImage):
# nibabel image, dissect and return pieces
return _get_txyz_shaped(img.get_data()), img.get_header(), img.__class__
else:
# no clue what it is
return None
def map2nifti(dataset, data=None, imghdr=None, imgtype=None):
"""Maps data(sets) into the original dataspace and wraps it into an Image.
Parameters
----------
dataset : Dataset
The mapper of this dataset is used to perform the reverse-mapping.
data : ndarray or Dataset, optional
The data to be wrapped into NiftiImage. If None (default), it
would wrap samples of the provided dataset. If it is a Dataset
instance -- takes its samples for mapping.
imghdr : None or dict, optional
Image header data. If None, the header is taken from `dataset.a.imghdr`.
imgtype : None or class, optional
Image class to be used for the instance. If None, the type is taken
from `dataset.a.imgtype`.
Returns
-------
Image
Instance of a class derived from :class:`nibabel.spatialimages.SpatialImage`,
such as Nifti1Image
"""
import nibabel
if data is None:
data = dataset.samples
elif isinstance(data, Dataset):
# ease users life
data = data.samples
# call the appropriate function to map single samples or multiples
if len(data.shape) > 1:
dsarray = dataset.a.mapper.reverse(data)
else:
dsarray = dataset.a.mapper.reverse1(data)
if imghdr is None:
if 'imghdr' in dataset.a:
imghdr = dataset.a.imghdr
elif __debug__:
debug('DS_NIFTI', 'No image header found. Using defaults.')
if imgtype is None:
if 'imgtype' in dataset.a:
imgtype = dataset.a.imgtype
else:
imgtype = nibabel.Nifti1Image
if __debug__:
debug('DS_NIFTI',
'No image type found in %s. Using default Nifti1Image.'
% (dataset.a))
# Augment header if data dsarray dtype could not be represented
# with imghdr.get_data_dtype()
if issubclass(imgtype, nibabel.spatialimages.SpatialImage) \
and (imghdr is None or hasattr(imghdr, 'get_data_dtype')):
# we can handle the desired image type and hdr with nibabel
# use of `None` for the affine should cause to pull it from
# the header
return imgtype(_get_xyzt_shaped(dsarray), None, imghdr)
else:
raise ValueError(
"Got imgtype=%s and imghdr=%s -- cannot generate an Image"
% (imgtype, imghdr))
return RuntimeError("Should have never got here -- check your Python")
def fmri_dataset(samples, targets=None, chunks=None, mask=None,
sprefix='voxel', tprefix='time', add_fa=None,):
"""Create a dataset from an fMRI timeseries image.
The timeseries image serves as the samples data, with each volume becoming
a sample. All 3D volume samples are flattened into one-dimensional feature
vectors, optionally being masked (i.e. subset of voxels corresponding to
non-zero elements in a mask image).
In addition to (optional) samples attributes for targets and chunks the
returned dataset contains a number of additional attributes:
Samples attributes (per each volume):
* volume index (time_indices)
* volume acquisition time (time_coord)
Feature attributes (per each voxel):
* voxel indices (voxel_indices), sometimes referred to as ijk
Dataset attributes:
* dump of the image (e.g. NIfTI) header data (imghdr)
* class of the image (e.g. Nifti1Image) (imgtype)
* volume extent (voxel_dim)
* voxel extent (voxel_eldim)
The default attribute name is listed in parenthesis, but may be altered by
the corresponding prefix arguments. The validity of the attribute values
relies on correct settings in the NIfTI image header.
Parameters
----------
samples : str or NiftiImage or list
fMRI timeseries, specified either as a filename (single file 4D image),
an image instance (4D image), or a list of filenames or image instances
(each list item corresponding to a 3D volume).
targets : scalar or sequence
Label attribute for each volume in the timeseries, or a scalar value that
is assigned to all samples.
chunks : scalar or sequence
Chunk attribute for each volume in the timeseries, or a scalar value that
is assigned to all samples.
mask : str or NiftiImage
Filename or image instance of a 3D volume mask. Voxels corresponding to
non-zero elements in the mask will be selected. The mask has to be in the
same space (orientation and dimensions) as the timeseries image
sprefix : str or None
Prefix for attribute names describing spatial properties of the
timeseries. If None, no such attributes are stored in the dataset.
tprefix : str or None
Prefix for attribute names describing temporal properties of the
timeseries. If None, no such attributes are stored in the dataset.
add_fa : dict or None
Optional dictionary with additional volumetric data that shall be stored
as feature attributes in the dataset. The dictionary key serves as the
feature attribute name. Each value might be of any type supported by the
'mask' argument of this function.
Returns
-------
Dataset
"""
# load the samples
imgdata, imghdr, imgtype = _load_anyimg(samples, ensure=True, enforce_dim=4)
# figure out what the mask is, but only handle known cases, the rest
# goes directly into the mapper which maybe knows more
maskimg = _load_anyimg(mask)
if maskimg is None:
pass
else:
# take just data and ignore the header
mask = maskimg[0]
# compile the samples attributes
sa = {}
if not targets is None:
sa['targets'] = _expand_attribute(targets, imgdata.shape[0], 'targets')
if not chunks is None:
sa['chunks'] = _expand_attribute(chunks, imgdata.shape[0], 'chunks')
# create a dataset
ds = Dataset(imgdata, sa=sa)
if sprefix is None:
space = None
else:
space = sprefix + '_indices'
ds = ds.get_mapped(FlattenMapper(shape=imgdata.shape[1:], space=space))
# now apply the mask if any
if not mask is None:
flatmask = ds.a.mapper.forward1(mask)
# direct slicing is possible, and it is potentially more efficient,
# so let's use it
#mapper = StaticFeatureSelection(flatmask)
#ds = ds.get_mapped(StaticFeatureSelection(flatmask))
ds = ds[:, flatmask != 0]
# load and store additional feature attributes
if not add_fa is None:
for fattr in add_fa:
value = _load_anyimg(add_fa[fattr], ensure=True)[0]
ds.fa[fattr] = ds.a.mapper.forward1(value)
# store interesting props in the dataset
ds.a['imghdr'] = imghdr
ds.a['imgtype'] = imgtype
# If there is a space assigned , store the extent of that space
if sprefix is not None:
ds.a[sprefix + '_dim'] = imgdata.shape[1:]
# 'voxdim' is (x,y,z) while 'samples' are (t,z,y,x)
ds.a[sprefix + '_eldim'] = _get_voxdim(imghdr)
# TODO extend with the unit
if tprefix is not None:
ds.sa[tprefix + '_indices'] = np.arange(len(ds), dtype='int')
ds.sa[tprefix + '_coords'] = np.arange(len(ds), dtype='float') \
* _get_dt(imghdr)
# TODO extend with the unit
return ds
def _get_voxdim(hdr):
"""Get the size of a voxel from some image header format."""
return hdr.get_zooms()[:-1]
def _get_dt(hdr):
"""Get the TR of a fMRI timeseries from some image header format."""
return hdr.get_zooms()[-1]
def _get_txyz_shaped(arr):
# we get the data as x,y,z[,t] but we want to have the time axis first
# if any
if len(arr.shape) == 4:
arr = np.rollaxis(arr, -1)
return arr
def _get_xyzt_shaped(arr):
# we get the data as [t,]x,y,z but we want to have the time axis last
# if any
if len(arr.shape) == 4:
arr = np.rollaxis(arr, 0, 4)
return arr
def _load_anyimg(src, ensure=False, enforce_dim=None):
"""Load/access NIfTI data from files or instances.
Parameters
----------
src : str or NiftiImage
Filename of a NIfTI image or a `NiftiImage` instance.
ensure : bool, optional
If True, throw ValueError exception if cannot be loaded.
enforce_dim : int or None
If not None, it is the dimensionality of the data to be enforced,
commonly 4D for the data, and 3D for the mask in case of fMRI.
Returns
-------
tuple or None
If the source is not supported None is returned. Otherwise a
tuple of (imgdata, imghdr, imgtype)
Raises
------
ValueError
If there is a problem with data (variable dimensionality) or
failed to load data and ensure=True.
"""
imgdata = imghdr = imgtype = None
# figure out whether we have a list of things to load and handle that
# first
if (isinstance(src, list) or isinstance(src, tuple)) \
and len(src)>0:
# load from a list of given entries
srcs = [_load_anyimg(s, ensure=ensure, enforce_dim=enforce_dim)
for s in src]
if __debug__:
# lets check if they all have the same dimensionality
# besides the leading one
shapes = [s[0].shape[1:] for s in srcs]
if not np.all([s == shapes[0] for s in shapes]):
raise ValueError(
"Input volumes vary in their shapes: %s" % (shapes,))
# Combine them all into a single beast
# will be t,x,y,z
imgdata = np.vstack([s[0] for s in srcs])
imghdr, imgtype = srcs[0][1:3]
else:
# try opening the beast; this might yield none in case of an unsupported
# argument and is handled accordingly below
data = _img2data(src)
if not data is None:
imgdata, imghdr, imgtype = data
if imgdata is not None and enforce_dim is not None:
shape, new_shape = imgdata.shape, None
lshape = len(shape)
# check if we need to tune up shape
if lshape < enforce_dim:
# if we are missing required dimension(s)
new_shape = (1,)*(enforce_dim-lshape) + shape
elif lshape > enforce_dim:
# if there are bogus dimensions at the beginning
bogus_dims = lshape - enforce_dim
if shape[:bogus_dims] != (1,)*bogus_dims:
raise ValueError, \
"Cannot enforce %dD on data with shape %s" \
% (enforce_dim, shape)
new_shape = shape[bogus_dims:]
# tune up shape if needed
if new_shape is not None:
if __debug__:
debug('DS_NIFTI', 'Enforcing shape %s for %s data from %s' %
(new_shape, shape, src))
imgdata.shape = new_shape
if imgdata is None:
return None
else:
return imgdata, imghdr, imgtype
|