Cancer Research UK Manchester Institute Configuration
All nf-core pipelines have been successfully configured for the use on the HPC (griffin) at Cancer Research UK Manchester Institute.
To use, run the pipeline with -profile crukmi
. This will download and launch the crukmi.config
which has been pre-configured with a setup suitable for the griffin HPC. Using this profile, singularity images will be downloaded to run on the cluster and stored in a centralised location.
Before running the pipeline you will need to load Nextflow using the environment module system, for example via:
## Load Nextflow environment modules
module purge
module load apps/nextflow/22.04.5
## Load Nextflow environment modules
module purge
module load apps/nextflow/22.04.5
The pipeline should always be executed inside a workspace on the /scratch/
system. All of the intermediate files required to run the pipeline will be stored in the work/
directory. It is recommended to delete this directory after the pipeline has finished successfully because it can get quite large.
Config file
//Profile config names for nf-core/configs
params {
config_profile_description = 'Cancer Research UK Manchester Institute HPC cluster profile provided by nf-core/configs'
config_profile_contact = 'Stephen Kitcatt, Simon Pearce (@skitcattCRUKMI, @sppearce)'
config_profile_url = 'http://scicom.picr.man.ac.uk/projects/user-support/wiki'
}
singularity {
cacheDir = '/lmod/nextflow_software'
enabled = true
autoMounts = true
}
process {
beforeScript = 'module load apps/apptainer/1.2.0'
executor = 'slurm'
queue = { task.memory <= 240.GB ? 'compute' : 'hmem' }
errorStrategy = {task.exitStatus in [143,137,104,134,139,140] ? 'retry' : 'finish'}
maxErrors = '-1'
maxRetries = 3
withLabel:process_single {
cpus = { check_max( 1 * task.attempt, 'cpus' ) }
memory = { check_max( 5.GB * task.attempt, 'memory' ) }
}
withLabel:process_low {
cpus = { check_max( 1 * task.attempt, 'cpus' ) }
memory = { check_max( 5.GB * task.attempt, 'memory' ) }
}
withLabel:process_medium {
cpus = { check_max( 4 * task.attempt, 'cpus' ) }
memory = { check_max( 20.GB * task.attempt, 'memory' ) }
}
withLabel:process_high {
cpus = { check_max( 48 * task.attempt, 'cpus' ) }
memory = { check_max( 240.GB * task.attempt, 'memory' ) }
}
}
executor {
name = 'slurm'
queueSize = 1000
pollInterval = '10 sec'
}
params {
max_memory = 4000.GB
max_cpus = 96
max_time = 72.h
}
//Profile config names for nf-core/configs
params {
config_profile_description = 'Cancer Research UK Manchester Institute HPC cluster profile provided by nf-core/configs'
config_profile_contact = 'Stephen Kitcatt, Simon Pearce (@skitcattCRUKMI, @sppearce)'
config_profile_url = 'http://scicom.picr.man.ac.uk/projects/user-support/wiki'
}
singularity {
cacheDir = '/lmod/nextflow_software'
enabled = true
autoMounts = true
}
process {
beforeScript = 'module load apps/apptainer/1.2.0'
executor = 'slurm'
queue = { task.memory <= 240.GB ? 'compute' : 'hmem' }
errorStrategy = {task.exitStatus in [143,137,104,134,139,140] ? 'retry' : 'finish'}
maxErrors = '-1'
maxRetries = 3
withLabel:process_single {
cpus = { check_max( 1 * task.attempt, 'cpus' ) }
memory = { check_max( 5.GB * task.attempt, 'memory' ) }
}
withLabel:process_low {
cpus = { check_max( 1 * task.attempt, 'cpus' ) }
memory = { check_max( 5.GB * task.attempt, 'memory' ) }
}
withLabel:process_medium {
cpus = { check_max( 4 * task.attempt, 'cpus' ) }
memory = { check_max( 20.GB * task.attempt, 'memory' ) }
}
withLabel:process_high {
cpus = { check_max( 48 * task.attempt, 'cpus' ) }
memory = { check_max( 240.GB * task.attempt, 'memory' ) }
}
}
executor {
name = 'slurm'
queueSize = 1000
pollInterval = '10 sec'
}
params {
max_memory = 4000.GB
max_cpus = 96
max_time = 72.h
}