18
18
Import necessary modules from nipype.
19
19
"""
20
20
21
- import os # system functions
22
- import nipype .interfaces .io as nio # Data i/o
23
- import nipype .interfaces .utility as util # utility
24
- import nipype .pipeline .engine as pe # pypeline engine
21
+ import os # system functions
22
+ import nipype .interfaces .io as nio # Data i/o
23
+ import nipype .interfaces .utility as util # utility
24
+ import nipype .pipeline .engine as pe # pypeline engine
25
25
import nipype .interfaces .camino as camino
26
26
import nipype .interfaces .fsl as fsl
27
27
import nipype .interfaces .camino2trackvis as cam2trk
28
28
import nipype .algorithms .misc as misc
29
-
30
29
"""
31
30
We use the following functions to scrape the voxel and data dimensions of the input images. This allows the
32
31
pipeline to be flexible enough to accept and process images of varying size. The SPM Face tutorial
@@ -62,21 +61,20 @@ def get_affine(volume):
62
61
nii = nb .load (volume , mmap = NUMPY_MMAP )
63
62
return nii .affine
64
63
64
+
65
65
subject_list = ['subj1' ]
66
66
fsl .FSLCommand .set_default_output_type ('NIFTI' )
67
-
68
-
69
67
"""
70
68
Map field names to individual subject runs
71
69
"""
72
70
73
- info = dict (dwi = [['subject_id' , 'data' ]],
74
- bvecs = [['subject_id' , 'bvecs' ]],
75
- bvals = [['subject_id' , 'bvals' ]])
76
-
77
- infosource = pe .Node (interface = util .IdentityInterface (fields = ['subject_id' ]),
78
- name = "infosource" )
71
+ info = dict (
72
+ dwi = [['subject_id' , 'data' ]],
73
+ bvecs = [['subject_id' , 'bvecs' ]],
74
+ bvals = [['subject_id' , 'bvals' ]])
79
75
76
+ infosource = pe .Node (
77
+ interface = util .IdentityInterface (fields = ['subject_id' ]), name = "infosource" )
80
78
"""Here we set up iteration over all the subjects. The following line
81
79
is a particular example of the flexibility of the system. The
82
80
``datasource`` attribute ``iterables`` tells the pipeline engine that
@@ -87,7 +85,6 @@ def get_affine(volume):
87
85
"""
88
86
89
87
infosource .iterables = ('subject_id' , subject_list )
90
-
91
88
"""
92
89
Now we create a :class:`nipype.interfaces.io.DataGrabber` object and
93
90
fill in the information from above about the layout of our data. The
@@ -96,9 +93,10 @@ def get_affine(volume):
96
93
functionality.
97
94
"""
98
95
99
- datasource = pe .Node (interface = nio .DataGrabber (infields = ['subject_id' ],
100
- outfields = list (info .keys ())),
101
- name = 'datasource' )
96
+ datasource = pe .Node (
97
+ interface = nio .DataGrabber (
98
+ infields = ['subject_id' ], outfields = list (info .keys ())),
99
+ name = 'datasource' )
102
100
103
101
datasource .inputs .template = "%s/%s"
104
102
@@ -109,13 +107,13 @@ def get_affine(volume):
109
107
datasource .inputs .field_template = dict (dwi = '%s/%s.nii.gz' )
110
108
datasource .inputs .template_args = info
111
109
datasource .inputs .sort_filelist = True
112
-
113
110
"""
114
111
An inputnode is used to pass the data obtained by the data grabber to the actual processing functions
115
112
"""
116
113
117
- inputnode = pe .Node (interface = util .IdentityInterface (fields = ["dwi" , "bvecs" , "bvals" ]), name = "inputnode" )
118
-
114
+ inputnode = pe .Node (
115
+ interface = util .IdentityInterface (fields = ["dwi" , "bvecs" , "bvals" ]),
116
+ name = "inputnode" )
119
117
"""
120
118
Setup for Diffusion Tensor Computation
121
119
--------------------------------------
@@ -126,13 +124,11 @@ def get_affine(volume):
126
124
image2voxel = pe .Node (interface = camino .Image2Voxel (), name = "image2voxel" )
127
125
fsl2scheme = pe .Node (interface = camino .FSL2Scheme (), name = "fsl2scheme" )
128
126
fsl2scheme .inputs .usegradmod = True
129
-
130
127
"""
131
128
Second, diffusion tensors are fit to the voxel-order data.
132
129
"""
133
130
134
131
dtifit = pe .Node (interface = camino .DTIFit (), name = 'dtifit' )
135
-
136
132
"""
137
133
Next, a lookup table is generated from the schemefile and the
138
134
signal-to-noise ratio (SNR) of the unweighted (q=0) data.
@@ -141,7 +137,6 @@ def get_affine(volume):
141
137
dtlutgen = pe .Node (interface = camino .DTLUTGen (), name = "dtlutgen" )
142
138
dtlutgen .inputs .snr = 16.0
143
139
dtlutgen .inputs .inversion = 1
144
-
145
140
"""
146
141
In this tutorial we implement probabilistic tractography using the PICo algorithm.
147
142
PICo tractography requires an estimate of the fibre direction and a model of its
@@ -150,29 +145,25 @@ def get_affine(volume):
150
145
151
146
picopdfs = pe .Node (interface = camino .PicoPDFs (), name = "picopdfs" )
152
147
picopdfs .inputs .inputmodel = 'dt'
153
-
154
148
"""
155
149
An FSL BET node creates a brain mask is generated from the diffusion image for seeding the PICo tractography.
156
150
"""
157
151
158
152
bet = pe .Node (interface = fsl .BET (), name = "bet" )
159
153
bet .inputs .mask = True
160
-
161
154
"""
162
155
Finally, tractography is performed.
163
156
First DT streamline tractography.
164
157
"""
165
158
166
159
trackdt = pe .Node (interface = camino .TrackDT (), name = "trackdt" )
167
-
168
160
"""
169
161
Now camino's Probablistic Index of connectivity algorithm.
170
162
In this tutorial, we will use only 1 iteration for time-saving purposes.
171
163
"""
172
164
173
165
trackpico = pe .Node (interface = camino .TrackPICo (), name = "trackpico" )
174
166
trackpico .inputs .iterations = 1
175
-
176
167
"""
177
168
Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to
178
169
convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse.
@@ -182,21 +173,20 @@ def get_affine(volume):
182
173
cam2trk_dt .inputs .min_length = 30
183
174
cam2trk_dt .inputs .voxel_order = 'LAS'
184
175
185
- cam2trk_pico = pe .Node (interface = cam2trk .Camino2Trackvis (), name = "cam2trk_pico" )
176
+ cam2trk_pico = pe .Node (
177
+ interface = cam2trk .Camino2Trackvis (), name = "cam2trk_pico" )
186
178
cam2trk_pico .inputs .min_length = 30
187
179
cam2trk_pico .inputs .voxel_order = 'LAS'
188
180
189
181
trk2camino = pe .Node (interface = cam2trk .Trackvis2Camino (), name = "trk2camino" )
190
-
191
182
"""
192
183
Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview,
193
184
using the following two nodes. For VTK use VtkStreamlines.
194
185
"""
195
186
196
- procstreamlines = pe .Node (interface = camino .ProcStreamlines (), name = "procstreamlines" )
187
+ procstreamlines = pe .Node (
188
+ interface = camino .ProcStreamlines (), name = "procstreamlines" )
197
189
procstreamlines .inputs .outputtracts = 'oogl'
198
-
199
-
200
190
"""
201
191
We can also produce a variety of scalar values from our fitted tensors. The following nodes generate the
202
192
fractional anisotropy and diffusivity trace maps and their associated headers.
@@ -206,45 +196,39 @@ def get_affine(volume):
206
196
trace = pe .Node (interface = camino .ComputeTensorTrace (), name = 'trace' )
207
197
dteig = pe .Node (interface = camino .ComputeEigensystem (), name = 'dteig' )
208
198
209
- analyzeheader_fa = pe .Node (interface = camino .AnalyzeHeader (), name = "analyzeheader_fa" )
199
+ analyzeheader_fa = pe .Node (
200
+ interface = camino .AnalyzeHeader (), name = "analyzeheader_fa" )
210
201
analyzeheader_fa .inputs .datatype = "double"
211
202
analyzeheader_trace = analyzeheader_fa .clone ('analyzeheader_trace' )
212
203
213
204
fa2nii = pe .Node (interface = misc .CreateNifti (), name = 'fa2nii' )
214
205
trace2nii = fa2nii .clone ("trace2nii" )
215
-
216
206
"""
217
207
Since we have now created all our nodes, we can now define our workflow and start making connections.
218
208
"""
219
209
220
210
tractography = pe .Workflow (name = 'tractography' )
221
211
222
212
tractography .connect ([(inputnode , bet , [("dwi" , "in_file" )])])
223
-
224
213
"""
225
214
File format conversion
226
215
"""
227
216
228
217
tractography .connect ([(inputnode , image2voxel , [("dwi" , "in_file" )]),
229
218
(inputnode , fsl2scheme , [("bvecs" , "bvec_file" ),
230
- ("bvals" , "bval_file" )])
231
- ])
232
-
219
+ ("bvals" , "bval_file" )])])
233
220
"""
234
221
Tensor fitting
235
222
"""
236
223
237
224
tractography .connect ([(image2voxel , dtifit , [['voxel_order' , 'in_file' ]]),
238
- (fsl2scheme , dtifit , [['scheme' , 'scheme_file' ]])
239
- ])
240
-
225
+ (fsl2scheme , dtifit , [['scheme' , 'scheme_file' ]])])
241
226
"""
242
227
Workflow for applying DT streamline tractogpahy
243
228
"""
244
229
245
230
tractography .connect ([(bet , trackdt , [("mask_file" , "seed_file" )])])
246
231
tractography .connect ([(dtifit , trackdt , [("tensor_fitted" , "in_file" )])])
247
-
248
232
"""
249
233
Workflow for applying PICo
250
234
"""
@@ -257,8 +241,6 @@ def get_affine(volume):
257
241
258
242
# ProcStreamlines might throw memory errors - comment this line out in such case
259
243
tractography .connect ([(trackdt , procstreamlines , [("tracked" , "in_file" )])])
260
-
261
-
262
244
"""
263
245
Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the
264
246
tensor fitting.
@@ -270,32 +252,35 @@ def get_affine(volume):
270
252
271
253
tractography .connect ([(dtifit , fa , [("tensor_fitted" , "in_file" )])])
272
254
tractography .connect ([(fa , analyzeheader_fa , [("fa" , "in_file" )])])
273
- tractography .connect ([(inputnode , analyzeheader_fa , [(('dwi' , get_vox_dims ), 'voxel_dims' ),
274
- (('dwi' , get_data_dims ), 'data_dims' )])])
255
+ tractography .connect ([(inputnode , analyzeheader_fa ,
256
+ [(('dwi' , get_vox_dims ), 'voxel_dims' ),
257
+ (('dwi' , get_data_dims ), 'data_dims' )])])
275
258
tractography .connect ([(fa , fa2nii , [('fa' , 'data_file' )])])
276
259
tractography .connect ([(inputnode , fa2nii , [(('dwi' , get_affine ), 'affine' )])])
277
260
tractography .connect ([(analyzeheader_fa , fa2nii , [('header' , 'header_file' )])])
278
261
279
-
280
262
tractography .connect ([(dtifit , trace , [("tensor_fitted" , "in_file" )])])
281
263
tractography .connect ([(trace , analyzeheader_trace , [("trace" , "in_file" )])])
282
- tractography .connect ([(inputnode , analyzeheader_trace , [(('dwi' , get_vox_dims ), 'voxel_dims' ),
283
- (('dwi' , get_data_dims ), 'data_dims' )])])
264
+ tractography .connect ([(inputnode , analyzeheader_trace ,
265
+ [(('dwi' , get_vox_dims ), 'voxel_dims' ),
266
+ (('dwi' , get_data_dims ), 'data_dims' )])])
284
267
tractography .connect ([(trace , trace2nii , [('trace' , 'data_file' )])])
285
- tractography .connect ([(inputnode , trace2nii , [(('dwi' , get_affine ), 'affine' )])])
286
- tractography .connect ([(analyzeheader_trace , trace2nii , [('header' , 'header_file' )])])
268
+ tractography .connect ([(inputnode , trace2nii , [(('dwi' , get_affine ),
269
+ 'affine' )])])
270
+ tractography .connect ([(analyzeheader_trace , trace2nii , [('header' ,
271
+ 'header_file' )])])
287
272
288
273
tractography .connect ([(dtifit , dteig , [("tensor_fitted" , "in_file" )])])
289
274
290
275
tractography .connect ([(trackpico , cam2trk_pico , [('tracked' , 'in_file' )])])
291
276
tractography .connect ([(trackdt , cam2trk_dt , [('tracked' , 'in_file' )])])
292
- tractography .connect ([(inputnode , cam2trk_pico , [(('dwi' , get_vox_dims ), 'voxel_dims' ),
293
- (('dwi' , get_data_dims ), 'data_dims' )])])
294
-
295
- tractography .connect ([(inputnode , cam2trk_dt , [(('dwi' , get_vox_dims ), 'voxel_dims' ),
296
- (('dwi' , get_data_dims ), 'data_dims' )])])
297
-
277
+ tractography .connect ([(inputnode , cam2trk_pico ,
278
+ [(('dwi' , get_vox_dims ), 'voxel_dims' ),
279
+ (('dwi' , get_data_dims ), 'data_dims' )])])
298
280
281
+ tractography .connect ([(inputnode , cam2trk_dt ,
282
+ [(('dwi' , get_vox_dims ), 'voxel_dims' ),
283
+ (('dwi' , get_data_dims ), 'data_dims' )])])
299
284
"""
300
285
Finally, we create another higher-level workflow to connect our tractography workflow with the info and datagrabbing nodes
301
286
declared at the beginning. Our tutorial can is now extensible to any arbitrary number of subjects by simply adding
@@ -305,19 +290,16 @@ def get_affine(volume):
305
290
workflow = pe .Workflow (name = "workflow" )
306
291
workflow .base_dir = os .path .abspath ('camino_dti_tutorial' )
307
292
workflow .connect ([(infosource , datasource , [('subject_id' , 'subject_id' )]),
308
- (datasource , tractography , [('dwi' , 'inputnode.dwi' ),
309
- ('bvals' , 'inputnode.bvals' ),
310
- ('bvecs' , 'inputnode.bvecs' )
311
- ])
312
- ])
293
+ (datasource , tractography ,
294
+ [('dwi' , 'inputnode.dwi' ), ('bvals' , 'inputnode.bvals' ),
295
+ ('bvecs' , 'inputnode.bvecs' )])])
313
296
"""
314
297
The following functions run the whole workflow and produce a .dot and .png graph of the processing pipeline.
315
298
"""
316
299
317
300
if __name__ == '__main__' :
318
301
workflow .run ()
319
302
workflow .write_graph ()
320
-
321
303
"""
322
304
You can choose the format of the experted graph with the ``format`` option. For example ``workflow.write_graph(format='eps')``
323
305
0 commit comments