#!/bin/bash ## Job Resource Interface Definition ## ## ntasks [integer(1)]: Number of required tasks, ## Set larger than 1 if you want to further parallelize ## with MPI within your job. ## ncpus [integer(1)]: Number of required cpus per task, ## Set larger than 1 if you want to further parallelize ## with multicore/parallel within each task. ## walltime [integer(1)]: Walltime for this job, in minutes. ## Must be at least 1 minute. ## memory [integer(1)]: Memory in megabytes for each cpu. ## Must be at least 100 (when I tried lower values my ## jobs did not start at all). ## ## Default resources can be set in your .batchtools.conf.R by defining the variable ## 'default.resources' as a named list. <% # relative paths are not handled well by Slurm log.file = normalizePath(log.file, winslash = "/", mustWork = FALSE) -%> #SBATCH --job-name=<%= job.name %> #SBATCH --output=<%= log.file %> #SBATCH --error=<%= log.file %> #SBATCH --time=<%= ceiling(resources$walltime / 1) %> #SBATCH --ntasks=1 #SBATCH --cpus-per-task=<%= resources$ncpus %> #SBATCH --mem-per-cpu=<%= resources$memory %> <%= if (!is.null(resources$partition)) sprintf(paste0("#SBATCH --partition='", resources$partition, "'")) %> <%= if (array.jobs) sprintf("#SBATCH --array=1-%i", nrow(jobs)) else "" %> <%= if (!is.null(resources$account)) sprintf(paste0("#SBATCH --account='", resources$account, "'")) %> ## Initialize work environment like ## source /etc/profile ## module add ... ## Export value of DEBUGME environemnt var to slave export DEBUGME=<%= Sys.getenv("DEBUGME") %> ## Run R: ## we merge R output with stdout from SLURM, which gets then logged via --output option Rscript -e 'batchtools::doJobCollection("<%= uri %>")'