Imperial CX1 HPC Configuration

All nf-core pipelines have been successfully configured for use on the CX1 cluster at Imperial College London HPC.

To use, run the pipeline with -profile imperial,standard. This will download and launch the imperial.config which has been pre-configured with a setup suitable for the CX1 cluster. Using this profile, a docker image containing all of the required software will be downloaded, and converted to a Singularity image before execution of the pipeline.

Before running the pipeline you will need to load Nextflow using the environment module system on the CX1 cluster. You can do this by issuing the commands below:

## Load Nextflow and Singularity environment modules
module load anaconda3/personal
conda install -c bioconda nextflow

NB: You will need an account to use the HPC cluster CX1 in order to run the pipeline. If in doubt contact IT. NB: Nextflow will need to submit the jobs via the job scheduler to the HPC cluster and as such the commands above will have to be executed on one of the login nodes. If in doubt contact IT. NB: To submit jobs to the Imperial College MEDBIO cluster, use -profile imperial,medbio instead. NB: You will need a restricted access account to use the HPC cluster MEDBIO.

Config file

See config file on GitHub

imperial.config
//Profile config names for nf-core/configs
 
params {
    // Config Params
    config_profile_description = 'Imperial College London - HPC Profile -- provided by nf-core/configs.'
    config_profile_contact = 'Combiz Khozoie (c.khozoie@imperial.ac.uk)'
    config_profile_url = 'https://www.imperial.ac.uk/admin-services/ict/self-service/research-support/rcs/'
 
    // Resources
    max_memory = 920.GB
    max_cpus = 256
    max_time = 1000.h
}
 
profiles {
    imperial {
        process {
            executor = 'pbspro'
 
            // Update amount of max retries and set "retry" as the error strategy for all error codes
            errorStrategy = 'retry'
            maxRetries = 5
            maxErrors = '-1'
 
 
            // General resource requirements
            queue  = { 4 * task.attempt  > 8 ? 'v1_throughput72' : 'v1_short8' }
            cpus   = { 1	* task.attempt }
            memory = { 6.GB	* task.attempt }
            time   = { 4.h	* task.attempt }
 
            // Process-specific resource requirements
            withLabel:process_single {
                cpus   = 1
                memory = { 6.GB	* task.attempt }
                time   = { 4.h	* task.attempt }
            }
 
            withLabel:process_low {
                queue  = 'v1_throughput72'
                cpus   = { 2	 * task.attempt }
                memory = { 48.GB * task.attempt }
                time   = { 8 * task.attempt  }
            }
 
            withLabel:process_medium {
                // TARGET QUEUE: throughput
                queue = 'v1_throughput72'
                cpus   = { 8	 * task.attempt }
                memory = { 64.GB * task.attempt }
                time   = { 12.h	 * task.attempt }
            }
 
            withLabel:process_high {
                // TARGET QUEUE: medium
                queue = 'v1_medium72'
                cpus   = { 12	 * task.attempt }
                memory = { 120.GB * task.attempt }
                time   = { 12.h	 * task.attempt }
            }
 
            withLabel:process_long {
                // TARGET QUEUE: medium
                queue  = 'v1_medium72'
                cpus   = 9
                memory = 100.GB
                time   = { 24.h	 * task.attempt }
            }
 
            withLabel:process_high_memory {
                // TARGET QUEUE: medium or largemem based on memory
                queue  = { 200 * task.attempt < 921 ? 'v1_medium72' : 'v1_largemem72' }
                cpus   = { 10	  * task.attempt }
                memory = { 200.GB * task.attempt }
                time   = { 24.h	  * task.attempt }
            }
            withLabel: with_gpus {
                queue            = 'gpu72'
                time             = 24.h
                clusterOptions   = '-l select=1:ncpus=4:mem=24gb:ngpus=1:gpu_type=RTX6000'
                maxForks         = 1
                containerOptions = { workflow.containerEngine == "singularity" ? '--nv --env CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES':
                                    ( workflow.containerEngine == "docker" ? '--gpus all': null ) }
                beforeScript     = 'module load tools/prod'
            }
        }
    }
    medbio {
        process {
            executor = 'pbspro'
            //queue = 'pqmedbio-tput'
        }
    }
}
 
executor {
    $pbspro {
        queueSize = 49
        submitRateLimit = '10 sec'
        maxForks = 49
    }
 
    $local {
        cpus = 2
        queueSize = 1
        memory = '6 GB'
    }
}
 
singularity {
    enabled = true
    autoMounts = true
    runOptions = "-B /rds/,/rds/general/user/$USER/ephemeral/tmp/:/tmp,/rds/general/user/$USER/ephemeral/tmp/:/var/tmp"
}