diff options
Diffstat (limited to 'Tools/BatchScripts')
-rw-r--r-- | Tools/BatchScripts/batch_cori.sh | 8 | ||||
-rw-r--r-- | Tools/BatchScripts/batch_cori_haswell.sh | 4 | ||||
-rw-r--r-- | Tools/BatchScripts/batch_perlmutter.sh | 45 |
3 files changed, 53 insertions, 4 deletions
diff --git a/Tools/BatchScripts/batch_cori.sh b/Tools/BatchScripts/batch_cori.sh index a827ad779..3c9698275 100644 --- a/Tools/BatchScripts/batch_cori.sh +++ b/Tools/BatchScripts/batch_cori.sh @@ -14,8 +14,8 @@ #SBATCH -S 4 #SBATCH -J <job name> #SBATCH -A <allocation ID> -#SBATCH -e error.txt -#SBATCH -o output.txt +#SBATCH -e WarpX.e%j +#SBATCH -o WarpX.o%j export OMP_PLACES=threads export OMP_PROC_BIND=spread @@ -40,4 +40,6 @@ export WARPX_THREAD_COUNT=$(( ${CORI_NHYPERTHREADS_MAX} / ${WARPX_NMPI_PER_NODE} # for async_io support: (optional) export MPICH_MAX_THREAD_SAFETY=multiple -srun --cpu_bind=cores -n $(( ${SLURM_JOB_NUM_NODES} * ${WARPX_NMPI_PER_NODE} )) -c ${WARPX_THREAD_COUNT} <path/to/executable> <input file> +srun --cpu_bind=cores -n $(( ${SLURM_JOB_NUM_NODES} * ${WARPX_NMPI_PER_NODE} )) -c ${WARPX_THREAD_COUNT} \ + <path/to/executable> <input file> \ + > output.txt diff --git a/Tools/BatchScripts/batch_cori_haswell.sh b/Tools/BatchScripts/batch_cori_haswell.sh index 0f3c66b49..bce5bfcaa 100644 --- a/Tools/BatchScripts/batch_cori_haswell.sh +++ b/Tools/BatchScripts/batch_cori_haswell.sh @@ -36,4 +36,6 @@ export MPICH_MAX_THREAD_SAFETY=multiple EXE="<path/to/executable>" -srun --cpu_bind=cores -n $(( ${SLURM_JOB_NUM_NODES} * ${WARPX_NMPI_PER_NODE} )) ${EXE} <input file> +srun --cpu_bind=cores -n $(( ${SLURM_JOB_NUM_NODES} * ${WARPX_NMPI_PER_NODE} )) \ + ${EXE} <input file> \ + > output.txt diff --git a/Tools/BatchScripts/batch_perlmutter.sh b/Tools/BatchScripts/batch_perlmutter.sh new file mode 100644 index 000000000..e3c47419e --- /dev/null +++ b/Tools/BatchScripts/batch_perlmutter.sh @@ -0,0 +1,45 @@ +#!/bin/bash -l + +# Copyright 2021 Axel Huebl, Kevin Gott +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL + +#SBATCH -t 01:00:00 +#SBATCH -N 4 +#SBATCH -J WarpX +#SBATCH -A <proj> +#SBATCH -C gpu +#SBATCH -c 32 +#SBATCH --ntasks-per-node=4 +#SBATCH --gpus-per-task=1 +#SBATCH --gpu-bind=single:1 +#SBATCH -o WarpX.o%j +#SBATCH -e WarpX.e%j + +# ============ +# -N = nodes +# -n = tasks (MPI ranks, usually = G) +# -G = GPUs (full Perlmutter node, 4) +# -c = CPU per task (128 total threads on CPU, 32 per GPU) +# +# --ntasks-per-node= number of tasks (MPI ranks) per node (full node, 4) +# --gpus-per-task= number of GPUs per task (MPI rank) (full node, 4) +# --gpus-per-node= number of GPUs per node (full node, 4) +# +# --gpu-bind=single:1 sets only one GPU to be visible to each MPI rank +# (quiets AMReX init warnings) +# +# Recommend using --ntasks-per-node=4, --gpus-per-task=1 and --gpu-bind=single:1, +# as they are fixed values and allow for easy scaling with less adjustments. +# +# ============ + +EXE=./warpx +#EXE=../WarpX/build/bin/warpx.3d.MPI.CUDA.DP.OPMD.QED +#EXE=./main3d.gnu.TPROF.MPI.CUDA.ex +INPUTS=inputs_small + +srun ${EXE} ${INPUTS} \ + > output.txt |