%YAML 1.1
---
# CPAC Pipeline Configuration YAML file
# Version 1.8.8.dev1
#
# http://fcp-indi.github.io for more info.
#
# Tip: This file can be edited manually with a text editor for quick modifications.
FROM: blank
pipeline_setup:
# Name for this pipeline configuration - useful for identification.
# This string will be sanitized and used in filepaths
pipeline_name: benchmark-FNIRT
output_directory:
# Quality control outputs
quality_control:
# Generate quality control pages containing preprocessing and derivative outputs.
generate_quality_control_images: On
# Include extra versions and intermediate steps of functional preprocessing in the output directory.
write_func_outputs: On
# Include extra outputs in the output directory that may be of interest when more information is needed.
write_debugging_outputs: On
system_config:
# Select Off if you intend to run CPAC on a single machine.
# If set to On, CPAC will attempt to submit jobs through the job scheduler / resource manager selected below.
on_grid:
SGE:
# SGE Parallel Environment to use when running CPAC.
# Only applies when you are running on a grid or compute cluster using SGE.
parallel_environment: cpac
# The maximum amount of memory each participant's workflow can allocate.
# Use this to place an upper bound of memory usage.
# - Warning: 'Memory Per Participant' multiplied by 'Number of Participants to Run Simultaneously'
# must not be more than the total amount of RAM.
# - Conversely, using too little RAM can impede the speed of a pipeline run.
# - It is recommended that you set this to a value that when multiplied by
# 'Number of Participants to Run Simultaneously' is as much RAM you can safely allocate.
maximum_memory_per_participant: 3
# The maximum amount of cores (on a single machine) or slots on a node (on a cluster/grid)
# to allocate per participant.
# - Setting this above 1 will parallelize each participant's workflow where possible.
# If you wish to dedicate multiple cores to ANTS-based anatomical registration (below),
# this value must be equal or higher than the amount of cores provided to ANTS.
# - The maximum number of cores your run can possibly employ will be this setting multiplied
# by the number of participants set to run in parallel (the 'Number of Participants to Run
# Simultaneously' setting).
max_cores_per_participant: 3
# The number of cores to allocate to ANTS-based anatomical registration per participant.
# - Multiple cores can greatly speed up this preprocessing step.
# - This number cannot be greater than the number of cores per participant.
num_ants_threads: 3
# The number of participant workflows to run at the same time.
# - The maximum number of cores your run can possibly employ will be this setting
# multiplied by the number of cores dedicated to each participant (the 'Maximum Number of Cores Per Participant' setting).
num_participants_at_once: 15
working_directory:
# Deletes the contents of the Working Directory after running.
# This saves disk space, but any additional preprocessing or analysis will have to be completely re-run.
remove_working_dir: Off
anatomical_preproc:
run: On
acpc_alignment:
T1w_brain_ACPC_template: $FSLDIR/data/standard/MNI152_T1_1mm_brain.nii.gz
brain_extraction:
run: On
# using: ['3dSkullStrip', 'BET', 'UNet', 'niworkflows-ants', 'FreeSurfer-ABCD', 'FreeSurfer-BET-Tight', 'FreeSurfer-BET-Loose', 'FreeSurfer-Brainmask']
# this is a fork option
using: [BET]
segmentation:
# Automatically segment anatomical images into white matter, gray matter,
# and CSF based on prior probability maps.
run: On
tissue_segmentation:
Template_Based:
# These masks should be in the same space of your registration template, e.g. if
# you choose 'EPI Template' , below tissue masks should also be EPI template tissue masks.
#
# Options: ['T1_Template', 'EPI_Template']
template_for_segmentation: []
registration_workflows:
anatomical_registration:
run: On
registration:
# using: ['ANTS', 'FSL', 'FSL-linear']
# this is a fork point
# selecting both ['ANTS', 'FSL'] will run both and fork the pipeline
using: [FSL]
# option parameters
ANTs:
# ANTs parameters for T1-template-based registration
T1_registration:
# Register skull-on anatomical image to a template.
reg_with_skull: Off
functional_registration:
coregistration:
# functional (BOLD/EPI) registration to anatomical (structural/T1)
run: On
boundary_based_registration:
# this is a fork point
# run: [On, Off] - this will run both and fork the pipeline
run: [On]
func_registration_to_template:
# these options modify the application (to the functional data), not the calculation, of the
# T1-to-template and EPI-to-template transforms calculated earlier during registration
# apply the functional-to-template (T1 template) registration transform to the functional data
run: On
output_resolution:
# The resolution (in mm) to which the registered derivative outputs are written into.
# NOTE:
# this is for the single-volume functional-space outputs (i.e. derivatives)
# thus, a higher resolution may not result in a large increase in RAM needs as above
func_derivative_outputs: 2mm
EPI_registration:
ANTs:
# EPI registration configuration - synonymous with T1_registration
# parameters under anatomical registration above
parameters:
functional_preproc:
run: On
slice_timing_correction:
# Interpolate voxel time courses so they are sampled at the same time points.
# this is a fork point
# run: [On, Off] - this will run both and fork the pipeline
run: [On]
motion_estimates_and_correction:
run: On
motion_correction:
# using: ['3dvolreg', 'mcflirt']
# Forking is currently broken for this option.
# Please use separate configs if you want to use each of 3dvolreg and mcflirt.
# Follow https://github.com/FCP-INDI/C-PAC/issues/1935 to see when this issue is resolved.
using: [3dvolreg]
distortion_correction:
# this is a fork point
# run: [On, Off] - this will run both and fork the pipeline
run: [On]
func_masking:
run: On
generate_func_mean:
# Generate mean functional image
run: On
normalize_func:
# Normalize functional image
run: On
coreg_prep:
# Generate sbref
run: On
nuisance_corrections:
2-nuisance_regression:
# this is a fork point
# run: [On, Off] - this will run both and fork the pipeline
run: [On]
# Select which nuisance signal corrections to apply
Regressors:
- Name: Regressor_1
Bandpass:
bottom_frequency: 0.01
top_frequency: 0.1
CerebrospinalFluid:
extraction_resolution: 2
summary: Mean
Motion:
include_delayed: On
include_delayed_squared: On
include_squared: On
PolyOrt:
degree: 1
aCompCor:
extraction_resolution: 2
summary:
components: 5
method: DetrendPC
tissues:
- WhiteMatter
- CerebrospinalFluid
- Name: Regressor_2
CerebrospinalFluid:
erode_mask: On
extraction_resolution: 2
summary: Mean
GlobalSignal:
summary: Mean
Motion:
include_delayed: On
include_delayed_squared: Off
include_squared: On
aCompCor:
extraction_resolution: 2
summary:
components: 5
method: DetrendPC
tissues:
- CerebrospinalFluid
- Name: Regressor_3
Censor:
method: Kill
number_of_previous_trs_to_censor: 1
number_of_subsequent_trs_to_censor: 1
thresholds:
- type: FD_P
value: 0.3
Motion:
include_delayed: On
include_delayed_squared: Off
include_squared: Off
# Process and refine masks used to produce regressors and time series for
# regression.
regressor_masks:
erode_anatomical_brain_mask:
# Erode brain mask in millimeters, default for brain mask is 30 mm
# Brain erosion default is using millimeters.
brain_mask_erosion_mm: 30
erode_csf:
# Erode cerebrospinal fluid mask in millimeters, default for cerebrospinal fluid is 30mm
# Cerebrospinal fluid erosion default is using millimeters.
csf_mask_erosion_mm: 30
erode_wm:
# Target volume ratio, if using erosion.
# Default proportion is 0.6 for white matter mask.
# If using erosion, using both proportion and millimeters is not recommended.
# White matter erosion default is using proportion erosion method when use erosion for white matter.
wm_erosion_prop: 0.6
erode_gm:
# Target volume ratio, if using erosion.
# If using erosion, using both proportion and millimeters is not recommended.
gm_erosion_prop: 0.6
1-ICA-AROMA:
# this is a fork point
# run: [On, Off] - this will run both and fork the pipeline
run: [On, Off]
timeseries_extraction:
run: On
connectivity_matrix:
# Create a connectivity matrix from timeseries data
# Options:
# ['AFNI', 'Nilearn', 'ndmg']
using: [Nilearn, ndmg]
# Options:
# ['Pearson', 'Partial']
# Note: These options are not configurable for ndmg, which will ignore these options
measure: [Pearson, Partial]
# Enter paths to region-of-interest (ROI) NIFTI files (.nii or .nii.gz) to be used for time-series extraction, and then select which types of analyses to run.
# Denote which analyses to run for each ROI path by listing the names below. For example, if you wish to run Avg and SpatialReg, you would enter: '/path/to/ROI.nii.gz': Avg, SpatialReg
# available analyses:
# /path/to/atlas.nii.gz: Avg, Voxel, SpatialReg
tse_roi_paths:
s3://fcp-indi/resources/cpac/resources/rois_2mm.nii.gz: Avg, Voxel, SpatialReg
amplitude_low_frequency_fluctuation:
# ALFF & f/ALFF
# Calculate Amplitude of Low Frequency Fluctuations (ALFF) and fractional ALFF (f/ALFF) for all voxels.
run: On
# space: Template or Native
target_space: [Native]
regional_homogeneity:
# ReHo
# Calculate Regional Homogeneity (ReHo) for all voxels.
run: On
# space: Template or Native
target_space: [Native]
voxel_mirrored_homotopic_connectivity:
# VMHC
# Calculate Voxel-mirrored Homotopic Connectivity (VMHC) for all voxels.
run: On
network_centrality:
# Calculate Degree, Eigenvector Centrality, or Functional Connectivity Density.
run: On
# Maximum amount of RAM (in GB) to be used when calculating Degree Centrality.
# Calculating Eigenvector Centrality will require additional memory based on the size of the mask or number of ROI nodes.
memory_allocation: 3.0
# Full path to a NIFTI file describing the mask. Centrality will be calculated for all voxels within the mask.
template_specification_file: s3://fcp-indi/resources/cpac/resources/mask-thr50-3mm.nii.gz
degree_centrality:
# Enable/Disable degree centrality by selecting the connectivity weights
# weight_options: ['Binarized', 'Weighted']
# disable this type of centrality with:
# weight_options: []
weight_options: [Binarized, Weighted]
eigenvector_centrality:
# Enable/Disable eigenvector centrality by selecting the connectivity weights
# weight_options: ['Binarized', 'Weighted']
# disable this type of centrality with:
# weight_options: []
weight_options: [Binarized, Weighted]
local_functional_connectivity_density:
# Enable/Disable lFCD by selecting the connectivity weights
# weight_options: ['Binarized', 'Weighted']
# disable this type of centrality with:
# weight_options: []
weight_options: [Binarized, Weighted]
# Select the type of threshold used when creating the lFCD adjacency matrix.
# options:
# 'Significance threshold', 'Correlation threshold'
correlation_threshold_option: Significance threshold
# Based on the Threshold Type selected above, enter a Threshold Value.
# P-value for Significance Threshold
# Sparsity value for Sparsity Threshold
# Pearson's r value for Correlation Threshold
correlation_threshold: 0.001
# OUTPUTS AND DERIVATIVES
# -----------------------
post_processing:
spatial_smoothing:
run: On
z-scoring:
run: On
seed_based_correlation_analysis:
# Enter paths to region-of-interest (ROI) NIFTI files (.nii or .nii.gz) to be used for seed-based correlation analysis, and then select which types of analyses to run.
# Denote which analyses to run for each ROI path by listing the names below. For example, if you wish to run Avg and MultReg, you would enter: '/path/to/ROI.nii.gz': Avg, MultReg
# available analyses:
# /path/to/atlas.nii.gz: Avg, DualReg, MultReg
sca_roi_paths:
s3://fcp-indi/resources/cpac/resources/rois_2mm.nii.gz: Avg, MultReg