# please set your project account export proj="" # change me! GPU projects must end in "..._g" # remembers the location of this script export MY_PROFILE=$(cd $(dirname $BASH_SOURCE) && pwd)"/"$(basename $BASH_SOURCE) if [ -z ${proj-} ]; then echo "WARNING: The 'proj' variable is not yet set in your $MY_PROFILE file! Please edit its line 2 to continue!"; return; fi # required dependencies module load cmake/3.22.0 # optional: for QED support with detailed tables export BOOST_ROOT=/global/common/software/spackecp/perlmutter/e4s-22.11/83104/spack/opt/spack/linux-sles15-zen3/gcc-11.2.0/boost-1.80.0-ute7nbx4wmfrw53q7hyh6wyezub5ljry # optional: for openPMD and PSATD+RZ support module load cray-hdf5-parallel/1.12.2.1 export CMAKE_PREFIX_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/c-blosc-1.21.1:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/adios2-2.8.3:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/blaspp-master:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/lapackpp-master:$CMAKE_PREFIX_PATH export LD_LIBRARY_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/c-blosc-1.21.1/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/adios2-2.8.3/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/blaspp-master/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/lapackpp-master/lib64:$LD_LIBRARY_PATH # optional: CCache export PATH=/global/common/software/spackecp/perlmutter/e4s-22.05/78535/spack/opt/spack/cray-sles15-zen3/gcc-11.2.0/ccache-4.5.1-ybl7xefvggn6hov4dsdxxnztji74tolj/bin:$PATH # optional: for Python bindings or libEnsemble module load cray-python/3.9.13.1 if [ -d "${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/venvs/warpx" ] then source ${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/venvs/warpx/bin/activate fi # an alias to request an interactive batch node for one hour # for parallel execution, start on the batch node: srun alias getNode="salloc -N 1 --ntasks-per-node=4 -t 1:00:00 -q interactive -C gpu --gpu-bind=single:1 -c 32 -G 4 -A $proj" # an alias to run a command on a batch node for up to 30min # usage: runNode alias runNode="srun -N 1 --ntasks-per-node=4 -t 0:30:00 -q interactive -C gpu --gpu-bind=single:1 -c 32 -G 4 -A $proj" # necessary to use CUDA-Aware MPI and run a job export CRAY_ACCEL_TARGET=nvidia80 # optimize CUDA compilation for A100 export AMREX_CUDA_ARCH=8.0 # optimize CPU microarchitecture for AMD EPYC 3rd Gen (Milan/Zen3) # note: the cc/CC/ftn wrappers below add those export CXXFLAGS="-march=znver3" export CFLAGS="-march=znver3" # compiler environment hints export CC=cc export CXX=CC export FC=ftn export CUDACXX=$(which nvcc) export CUDAHOSTCXX=CC