diff options
-rw-r--r-- | Tools/machines/perlmutter-nersc/perlmutter.sbatch | 21 |
1 files changed, 1 insertions, 20 deletions
diff --git a/Tools/machines/perlmutter-nersc/perlmutter.sbatch b/Tools/machines/perlmutter-nersc/perlmutter.sbatch index 65777f304..93e79f151 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter.sbatch +++ b/Tools/machines/perlmutter-nersc/perlmutter.sbatch @@ -11,8 +11,7 @@ #SBATCH -J WarpX # note: <proj> must end on _g #SBATCH -A <proj> -# for m3906_g LBNL/AMP users: for large runs, comment in -##SBATCH -q early_science +#SBATCH -q regular #SBATCH -C gpu #SBATCH -c 32 #SBATCH --ntasks-per-node=4 @@ -20,24 +19,6 @@ #SBATCH -o WarpX.o%j #SBATCH -e WarpX.e%j -# ============ -# -N = nodes -# -n = tasks (MPI ranks, usually = G) -# -G = GPUs (full Perlmutter node, 4) -# -c = CPU per task (128 total threads on CPU, 32 per GPU) -# -# --ntasks-per-node= number of tasks (MPI ranks) per node (full node, 4) -# --gpus-per-task= number of GPUs per task (MPI rank) (full node, 4) -# --gpus-per-node= number of GPUs per node (full node, 4) -# -# --gpu-bind=single:1 sets only one GPU to be visible to each MPI rank -# (quiets AMReX init warnings) -# -# Recommend using --ntasks-per-node=4, --gpus-per-task=1 and --gpu-bind=single:1, -# as they are fixed values and allow for easy scaling with less adjustments. -# -# ============ - # GPU-aware MPI export MPICH_GPU_SUPPORT_ENABLED=1 |