aboutsummaryrefslogtreecommitdiff
path: root/Tools/BatchScripts
diff options
context:
space:
mode:
authorGravatar Axel Huebl <axel.huebl@plasma.ninja> 2021-08-30 16:47:16 -0700
committerGravatar GitHub <noreply@github.com> 2021-08-30 16:47:16 -0700
commit35a66bcd8fde93507d7c1538d61893fbec016b31 (patch)
tree07856e98502c8eaf9774451736c9d071dd3e806e /Tools/BatchScripts
parente130e57efcee4fb08cae9a5888c67c83db63abb4 (diff)
downloadWarpX-35a66bcd8fde93507d7c1538d61893fbec016b31.tar.gz
WarpX-35a66bcd8fde93507d7c1538d61893fbec016b31.tar.zst
WarpX-35a66bcd8fde93507d7c1538d61893fbec016b31.zip
Docs: Perlmutter (#2229)
* Docs: Perlmutter Start a documentation page for Perlmutter. * Cleaning - better links to docs - clean submission script * Perlmutter: Add I/O
Diffstat (limited to 'Tools/BatchScripts')
-rw-r--r--Tools/BatchScripts/batch_cori.sh8
-rw-r--r--Tools/BatchScripts/batch_cori_haswell.sh4
-rw-r--r--Tools/BatchScripts/batch_perlmutter.sh45
3 files changed, 53 insertions, 4 deletions
diff --git a/Tools/BatchScripts/batch_cori.sh b/Tools/BatchScripts/batch_cori.sh
index a827ad779..3c9698275 100644
--- a/Tools/BatchScripts/batch_cori.sh
+++ b/Tools/BatchScripts/batch_cori.sh
@@ -14,8 +14,8 @@
#SBATCH -S 4
#SBATCH -J <job name>
#SBATCH -A <allocation ID>
-#SBATCH -e error.txt
-#SBATCH -o output.txt
+#SBATCH -e WarpX.e%j
+#SBATCH -o WarpX.o%j
export OMP_PLACES=threads
export OMP_PROC_BIND=spread
@@ -40,4 +40,6 @@ export WARPX_THREAD_COUNT=$(( ${CORI_NHYPERTHREADS_MAX} / ${WARPX_NMPI_PER_NODE}
# for async_io support: (optional)
export MPICH_MAX_THREAD_SAFETY=multiple
-srun --cpu_bind=cores -n $(( ${SLURM_JOB_NUM_NODES} * ${WARPX_NMPI_PER_NODE} )) -c ${WARPX_THREAD_COUNT} <path/to/executable> <input file>
+srun --cpu_bind=cores -n $(( ${SLURM_JOB_NUM_NODES} * ${WARPX_NMPI_PER_NODE} )) -c ${WARPX_THREAD_COUNT} \
+ <path/to/executable> <input file> \
+ > output.txt
diff --git a/Tools/BatchScripts/batch_cori_haswell.sh b/Tools/BatchScripts/batch_cori_haswell.sh
index 0f3c66b49..bce5bfcaa 100644
--- a/Tools/BatchScripts/batch_cori_haswell.sh
+++ b/Tools/BatchScripts/batch_cori_haswell.sh
@@ -36,4 +36,6 @@ export MPICH_MAX_THREAD_SAFETY=multiple
EXE="<path/to/executable>"
-srun --cpu_bind=cores -n $(( ${SLURM_JOB_NUM_NODES} * ${WARPX_NMPI_PER_NODE} )) ${EXE} <input file>
+srun --cpu_bind=cores -n $(( ${SLURM_JOB_NUM_NODES} * ${WARPX_NMPI_PER_NODE} )) \
+ ${EXE} <input file> \
+ > output.txt
diff --git a/Tools/BatchScripts/batch_perlmutter.sh b/Tools/BatchScripts/batch_perlmutter.sh
new file mode 100644
index 000000000..e3c47419e
--- /dev/null
+++ b/Tools/BatchScripts/batch_perlmutter.sh
@@ -0,0 +1,45 @@
+#!/bin/bash -l
+
+# Copyright 2021 Axel Huebl, Kevin Gott
+#
+# This file is part of WarpX.
+#
+# License: BSD-3-Clause-LBNL
+
+#SBATCH -t 01:00:00
+#SBATCH -N 4
+#SBATCH -J WarpX
+#SBATCH -A <proj>
+#SBATCH -C gpu
+#SBATCH -c 32
+#SBATCH --ntasks-per-node=4
+#SBATCH --gpus-per-task=1
+#SBATCH --gpu-bind=single:1
+#SBATCH -o WarpX.o%j
+#SBATCH -e WarpX.e%j
+
+# ============
+# -N = nodes
+# -n = tasks (MPI ranks, usually = G)
+# -G = GPUs (full Perlmutter node, 4)
+# -c = CPU per task (128 total threads on CPU, 32 per GPU)
+#
+# --ntasks-per-node= number of tasks (MPI ranks) per node (full node, 4)
+# --gpus-per-task= number of GPUs per task (MPI rank) (full node, 4)
+# --gpus-per-node= number of GPUs per node (full node, 4)
+#
+# --gpu-bind=single:1 sets only one GPU to be visible to each MPI rank
+# (quiets AMReX init warnings)
+#
+# Recommend using --ntasks-per-node=4, --gpus-per-task=1 and --gpu-bind=single:1,
+# as they are fixed values and allow for easy scaling with less adjustments.
+#
+# ============
+
+EXE=./warpx
+#EXE=../WarpX/build/bin/warpx.3d.MPI.CUDA.DP.OPMD.QED
+#EXE=./main3d.gnu.TPROF.MPI.CUDA.ex
+INPUTS=inputs_small
+
+srun ${EXE} ${INPUTS} \
+ > output.txt