fMRI: FSL

A workflow that uses fsl to perform a first level analysis on the nipype tutorial data set:

python fmri_fsl.py

First tell python where to find the appropriate functions.

from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import range

import os  # system functions

import nipype.interfaces.io as nio  # Data i/o
import nipype.interfaces.fsl as fsl  # fsl
import nipype.interfaces.utility as util  # utility
import nipype.pipeline.engine as pe  # pypeline engine
import nipype.algorithms.modelgen as model  # model generation
import nipype.algorithms.rapidart as ra  # artifact detection

Preliminaries

Setup any package specific configuration. The output file format for FSL routines is being set to compressed NIFTI.

fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

Setting up workflows

In this tutorial we will be setting up a hierarchical workflow for fsl analysis. This will demonstrate how pre-defined workflows can be setup and shared across users, projects and labs.

Setup preprocessing workflow

This is a generic fsl feat preprocessing workflow encompassing skull stripping, motion correction and smoothing operations.

preproc = pe.Workflow(name='preproc')

Set up a node to define all inputs required for the preprocessing workflow

inputnode = pe.Node(
    interface=util.IdentityInterface(fields=[
        'func',
        'struct',
    ]),
    name='inputspec')

Convert functional images to float representation. Since there can be more than one functional run we use a MapNode to convert each run.

img2float = pe.MapNode(
    interface=fsl.ImageMaths(
        out_data_type='float', op_string='', suffix='_dtype'),
    iterfield=['in_file'],
    name='img2float')
preproc.connect(inputnode, 'func', img2float, 'in_file')

Extract the middle volume of the first run as the reference

extract_ref = pe.Node(interface=fsl.ExtractROI(t_size=1), name='extractref')

Define a function to pick the first file from a list of files

def pickfirst(files):
    if isinstance(files, list):
        return files[0]
    else:
        return files


preproc.connect(img2float, ('out_file', pickfirst), extract_ref, 'in_file')

Define a function to return the 1 based index of the middle volume

def getmiddlevolume(func):
    from nibabel import load
    funcfile = func
    if isinstance(func, list):
        funcfile = func[0]
    _, _, _, timepoints = load(funcfile).shape
    return int(timepoints / 2) - 1


preproc.connect(inputnode, ('func', getmiddlevolume), extract_ref, 't_min')

Realign the functional runs to the middle volume of the first run

motion_correct = pe.MapNode(
    interface=fsl.MCFLIRT(save_mats=True, save_plots=True),
    name='realign',
    iterfield=['in_file'])
preproc.connect(img2float, 'out_file', motion_correct, 'in_file')
preproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file')

Plot the estimated motion parameters

plot_motion = pe.MapNode(
    interface=fsl.PlotMotionParams(in_source='fsl'),
    name='plot_motion',
    iterfield=['in_file'])
plot_motion.iterables = ('plot_type', ['rotations', 'translations'])
preproc.connect(motion_correct, 'par_file', plot_motion, 'in_file')

Extract the mean volume of the first functional run

meanfunc = pe.Node(
    interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'),
    name='meanfunc')
preproc.connect(motion_correct, ('out_file', pickfirst), meanfunc, 'in_file')

Strip the skull from the mean functional to generate a mask

meanfuncmask = pe.Node(
    interface=fsl.BET(mask=True, no_output=True, frac=0.3),
    name='meanfuncmask')
preproc.connect(meanfunc, 'out_file', meanfuncmask, 'in_file')

Mask the functional runs with the extracted mask

maskfunc = pe.MapNode(
    interface=fsl.ImageMaths(suffix='_bet', op_string='-mas'),
    iterfield=['in_file'],
    name='maskfunc')
preproc.connect(motion_correct, 'out_file', maskfunc, 'in_file')
preproc.connect(meanfuncmask, 'mask_file', maskfunc, 'in_file2')

Determine the 2nd and 98th percentile intensities of each functional run

getthresh = pe.MapNode(
    interface=fsl.ImageStats(op_string='-p 2 -p 98'),
    iterfield=['in_file'],
    name='getthreshold')
preproc.connect(maskfunc, 'out_file', getthresh, 'in_file')

Threshold the first run of the functional data at 10% of the 98th percentile

threshold = pe.Node(
    interface=fsl.ImageMaths(out_data_type='char', suffix='_thresh'),
    name='threshold')
preproc.connect(maskfunc, ('out_file', pickfirst), threshold, 'in_file')

Define a function to get 10% of the intensity

def getthreshop(thresh):
    return '-thr %.10f -Tmin -bin' % (0.1 * thresh[0][1])


preproc.connect(getthresh, ('out_stat', getthreshop), threshold, 'op_string')

Determine the median value of the functional runs using the mask

medianval = pe.MapNode(
    interface=fsl.ImageStats(op_string='-k %s -p 50'),
    iterfield=['in_file'],
    name='medianval')
preproc.connect(motion_correct, 'out_file', medianval, 'in_file')
preproc.connect(threshold, 'out_file', medianval, 'mask_file')

Dilate the mask

dilatemask = pe.Node(
    interface=fsl.ImageMaths(suffix='_dil', op_string='-dilF'),
    name='dilatemask')
preproc.connect(threshold, 'out_file', dilatemask, 'in_file')

Mask the motion corrected functional runs with the dilated mask

maskfunc2 = pe.MapNode(
    interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'),
    iterfield=['in_file'],
    name='maskfunc2')
preproc.connect(motion_correct, 'out_file', maskfunc2, 'in_file')
preproc.connect(dilatemask, 'out_file', maskfunc2, 'in_file2')

Determine the mean image from each functional run

meanfunc2 = pe.MapNode(
    interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'),
    iterfield=['in_file'],
    name='meanfunc2')
preproc.connect(maskfunc2, 'out_file', meanfunc2, 'in_file')

Merge the median values with the mean functional images into a coupled list

mergenode = pe.Node(interface=util.Merge(2, axis='hstack'), name='merge')
preproc.connect(meanfunc2, 'out_file', mergenode, 'in1')
preproc.connect(medianval, 'out_stat', mergenode, 'in2')

Smooth each run using SUSAN with the brightness threshold set to 75% of the median value for each run and a mask constituting the mean functional

smooth = pe.MapNode(
    interface=fsl.SUSAN(),
    iterfield=['in_file', 'brightness_threshold', 'usans'],
    name='smooth')

Define a function to get the brightness threshold for SUSAN

def getbtthresh(medianvals):
    return [0.75 * val for val in medianvals]


def getusans(x):
    return [[tuple([val[0], 0.75 * val[1]])] for val in x]


preproc.connect(maskfunc2, 'out_file', smooth, 'in_file')
preproc.connect(medianval, ('out_stat', getbtthresh), smooth,
                'brightness_threshold')
preproc.connect(mergenode, ('out', getusans), smooth, 'usans')

Mask the smoothed data with the dilated mask

maskfunc3 = pe.MapNode(
    interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'),
    iterfield=['in_file'],
    name='maskfunc3')
preproc.connect(smooth, 'smoothed_file', maskfunc3, 'in_file')
preproc.connect(dilatemask, 'out_file', maskfunc3, 'in_file2')

Scale each volume of the run so that the median value of the run is set to 10000

intnorm = pe.MapNode(
    interface=fsl.ImageMaths(suffix='_intnorm'),
    iterfield=['in_file', 'op_string'],
    name='intnorm')
preproc.connect(maskfunc3, 'out_file', intnorm, 'in_file')

Define a function to get the scaling factor for intensity normalization

def getinormscale(medianvals):
    return ['-mul %.10f' % (10000. / val) for val in medianvals]


preproc.connect(medianval, ('out_stat', getinormscale), intnorm, 'op_string')

Perform temporal highpass filtering on the data

highpass = pe.MapNode(
    interface=fsl.ImageMaths(suffix='_tempfilt'),
    iterfield=['in_file'],
    name='highpass')
preproc.connect(intnorm, 'out_file', highpass, 'in_file')

Generate a mean functional image from the first run

meanfunc3 = pe.MapNode(
    interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'),
    iterfield=['in_file'],
    name='meanfunc3')
preproc.connect(highpass, ('out_file', pickfirst), meanfunc3, 'in_file')

Strip the structural image and coregister the mean functional image to the structural image

nosestrip = pe.Node(interface=fsl.BET(frac=0.3), name='nosestrip')
skullstrip = pe.Node(interface=fsl.BET(mask=True), name='stripstruct')

coregister = pe.Node(interface=fsl.FLIRT(dof=6), name='coregister')

Use nipype.algorithms.rapidart to determine which of the images in the functional series are outliers based on deviations in intensity and/or movement.

art = pe.MapNode(
    interface=ra.ArtifactDetect(
        use_differences=[True, False],
        use_norm=True,
        norm_threshold=1,
        zintensity_threshold=3,
        parameter_source='FSL',
        mask_type='file'),
    iterfield=['realigned_files', 'realignment_parameters'],
    name="art")

preproc.connect([
    (inputnode, nosestrip, [('struct', 'in_file')]),
    (nosestrip, skullstrip, [('out_file', 'in_file')]),
    (skullstrip, coregister, [('out_file', 'in_file')]),
    (meanfunc2, coregister, [(('out_file', pickfirst), 'reference')]),
    (motion_correct, art, [('par_file', 'realignment_parameters')]),
    (maskfunc2, art, [('out_file', 'realigned_files')]),
    (dilatemask, art, [('out_file', 'mask_file')]),
])

Set up model fitting workflow

modelfit = pe.Workflow(name='modelfit')

Use nipype.algorithms.modelgen.SpecifyModel to generate design information.

modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec")

Use nipype.interfaces.fsl.Level1Design to generate a run specific fsf file for analysis

level1design = pe.Node(interface=fsl.Level1Design(), name="level1design")

Use nipype.interfaces.fsl.FEATModel to generate a run specific mat file for use by FILMGLS

modelgen = pe.MapNode(
    interface=fsl.FEATModel(),
    name='modelgen',
    iterfield=['fsf_file', 'ev_files'])

Use nipype.interfaces.fsl.FILMGLS to estimate a model specified by a mat file and a functional run

modelestimate = pe.MapNode(
    interface=fsl.FILMGLS(smooth_autocorr=True, mask_size=5, threshold=1000),
    name='modelestimate',
    iterfield=['design_file', 'in_file'])

Use nipype.interfaces.fsl.ContrastMgr to generate contrast estimates

conestimate = pe.MapNode(
    interface=fsl.ContrastMgr(),
    name='conestimate',
    iterfield=[
        'tcon_file', 'param_estimates', 'sigmasquareds', 'corrections',
        'dof_file'
    ])

modelfit.connect([
    (modelspec, level1design, [('session_info', 'session_info')]),
    (level1design, modelgen, [('fsf_files', 'fsf_file'), ('ev_files',
                                                          'ev_files')]),
    (modelgen, modelestimate, [('design_file', 'design_file')]),
    (modelgen, conestimate, [('con_file', 'tcon_file')]),
    (modelestimate, conestimate,
     [('param_estimates', 'param_estimates'), ('sigmasquareds',
                                               'sigmasquareds'),
      ('corrections', 'corrections'), ('dof_file', 'dof_file')]),
])

Set up fixed-effects workflow

fixed_fx = pe.Workflow(name='fixedfx')

Use nipype.interfaces.fsl.Merge to merge the copes and varcopes for each condition

copemerge = pe.MapNode(
    interface=fsl.Merge(dimension='t'),
    iterfield=['in_files'],
    name="copemerge")

varcopemerge = pe.MapNode(
    interface=fsl.Merge(dimension='t'),
    iterfield=['in_files'],
    name="varcopemerge")

Use nipype.interfaces.fsl.L2Model to generate subject and condition specific level 2 model design files

level2model = pe.Node(interface=fsl.L2Model(), name='l2model')

Use nipype.interfaces.fsl.FLAMEO to estimate a second level model

flameo = pe.MapNode(
    interface=fsl.FLAMEO(run_mode='fe'),
    name="flameo",
    iterfield=['cope_file', 'var_cope_file'])

fixed_fx.connect([
    (copemerge, flameo, [('merged_file', 'cope_file')]),
    (varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
    (level2model, flameo, [('design_mat', 'design_file'),
                           ('design_con', 't_con_file'), ('design_grp',
                                                          'cov_split_file')]),
])

Set up first-level workflow

def sort_copes(files):
    numelements = len(files[0])
    outfiles = []
    for i in range(numelements):
        outfiles.insert(i, [])
        for j, elements in enumerate(files):
            outfiles[i].append(elements[i])
    return outfiles


def num_copes(files):
    return len(files)


firstlevel = pe.Workflow(name='firstlevel')
firstlevel.connect(
    [(preproc, modelfit, [('highpass.out_file', 'modelspec.functional_runs'),
                          ('art.outlier_files', 'modelspec.outlier_files'),
                          ('highpass.out_file', 'modelestimate.in_file')]),
     (preproc, fixed_fx,
      [('coregister.out_file', 'flameo.mask_file')]), (modelfit, fixed_fx, [
          (('conestimate.copes', sort_copes), 'copemerge.in_files'),
          (('conestimate.varcopes', sort_copes), 'varcopemerge.in_files'),
          (('conestimate.copes', num_copes), 'l2model.num_copes'),
      ])])

Experiment specific components

The nipype tutorial contains data for two subjects. Subject data is in two subdirectories, s1 and s2. Each subject directory contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And one anatomical volume named struct.nii.

Below we set some variables to inform the datasource about the layout of our data. We specify the location of the data, the subject sub-directories and a dictionary that maps each run to a mnemonic (or field) for the run type (struct or func). These fields become the output fields of the datasource node in the pipeline.

In the example below, run ‘f3’ is of type ‘func’ and gets mapped to a nifti filename through a template ‘%s.nii’. So ‘f3’ would become ‘f3.nii’.

# Specify the location of the data.
data_dir = os.path.abspath('data')
# Specify the subject directories
subject_list = ['s1']  # , 's3']
# Map field names to individual subject runs.
info = dict(
    func=[['subject_id', ['f3', 'f5', 'f7', 'f10']]],
    struct=[['subject_id', 'struct']])

infosource = pe.Node(
    interface=util.IdentityInterface(fields=['subject_id']), name="infosource")

Here we set up iteration over all the subjects. The following line is a particular example of the flexibility of the system. The datasource attribute iterables tells the pipeline engine that it should repeat the analysis on each of the items in the subject_list. In the current example, the entire first level preprocessing and estimation will be repeated for each subject contained in subject_list.

infosource.iterables = ('subject_id', subject_list)

Now we create a nipype.interfaces.io.DataSource object and fill in the information from above about the layout of our data. The nipype.pipeline.NodeWrapper module wraps the interface object and provides additional housekeeping and pipeline specific functionality.

datasource = pe.Node(
    interface=nio.DataGrabber(
        infields=['subject_id'], outfields=['func', 'struct']),
    name='datasource')
datasource.inputs.base_directory = data_dir
datasource.inputs.template = '%s/%s.nii'
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True

Use the get_node function to retrieve an internal node by name. Then set the iterables on this node to perform two different extents of smoothing.

smoothnode = firstlevel.get_node('preproc.smooth')
assert (str(smoothnode) == 'preproc.smooth')
smoothnode.iterables = ('fwhm', [5., 10.])

hpcutoff = 120
TR = 3.  # ensure float
firstlevel.inputs.preproc.highpass.suffix = '_hpf'
firstlevel.inputs.preproc.highpass.op_string = '-bptf %d -1' % (hpcutoff / TR)

Setup a function that returns subject-specific information about the experimental paradigm. This is used by the nipype.interfaces.spm.SpecifyModel to create the information necessary to generate an SPM design matrix. In this tutorial, the same paradigm was used for every participant. Other examples of this function are available in the doc/examples folder. Note: Python knowledge required here.

def subjectinfo(subject_id):
    from nipype.interfaces.base import Bunch
    from copy import deepcopy
    print("Subject ID: %s\n" % str(subject_id))
    output = []
    names = ['Task-Odd', 'Task-Even']
    for r in range(4):
        onsets = [list(range(15, 240, 60)), list(range(45, 240, 60))]
        output.insert(r,
                      Bunch(
                          conditions=names,
                          onsets=deepcopy(onsets),
                          durations=[[15] for s in names],
                          amplitudes=None,
                          tmod=None,
                          pmod=None,
                          regressor_names=None,
                          regressors=None))
    return output

Setup the contrast structure that needs to be evaluated. This is a list of lists. The inner list specifies the contrasts and has the following format - [Name,Stat,[list of condition names],[weights on those conditions]. The condition names must match the names listed in the subjectinfo function described above.

cont1 = ['Task>Baseline', 'T', ['Task-Odd', 'Task-Even'], [0.5, 0.5]]
cont2 = ['Task-Odd>Task-Even', 'T', ['Task-Odd', 'Task-Even'], [1, -1]]
cont3 = ['Task', 'F', [cont1, cont2]]
contrasts = [cont1, cont2]

firstlevel.inputs.modelfit.modelspec.input_units = 'secs'
firstlevel.inputs.modelfit.modelspec.time_repetition = TR
firstlevel.inputs.modelfit.modelspec.high_pass_filter_cutoff = hpcutoff

firstlevel.inputs.modelfit.level1design.interscan_interval = TR
firstlevel.inputs.modelfit.level1design.bases = {'dgamma': {'derivs': False}}
firstlevel.inputs.modelfit.level1design.contrasts = contrasts
firstlevel.inputs.modelfit.level1design.model_serial_correlations = True

Set up complete workflow

l1pipeline = pe.Workflow(name="level1")
l1pipeline.base_dir = os.path.abspath('./fsl/workingdir')
l1pipeline.config = {
    "execution": {
        "crashdump_dir": os.path.abspath('./fsl/crashdumps')
    }
}

l1pipeline.connect([
    (infosource, datasource, [('subject_id', 'subject_id')]),
    (infosource, firstlevel, [(('subject_id', subjectinfo),
                               'modelfit.modelspec.subject_info')]),
    (datasource, firstlevel, [
        ('struct', 'preproc.inputspec.struct'),
        ('func', 'preproc.inputspec.func'),
    ]),
])

Execute the pipeline

The code discussed above sets up all the necessary data structures with appropriate parameters and the connectivity between the processes, but does not generate any output. To actually run the analysis on the data the nipype.pipeline.engine.Pipeline.Run function needs to be called.

if __name__ == '__main__':
    l1pipeline.write_graph()
    outgraph = l1pipeline.run()
    # l1pipeline.run(plugin='MultiProc', plugin_args={'n_procs':2})

Example source code

You can download the full source code of this example. This same script is also included in Nipype1 Examples Niflow under the package/niflow/nipype1/examples directory.