aboutsummaryrefslogtreecommitdiff
path: root/Source/Parallelization
diff options
context:
space:
mode:
authorGravatar David Grote <grote1@llnl.gov> 2022-01-20 15:53:23 -0800
committerGravatar GitHub <noreply@github.com> 2022-01-20 15:53:23 -0800
commitc7c8a710cb1b99bc72343f4215011f3c0ec4f16e (patch)
treec3aad20d0b41185ad3d4fd31b8c7797b6a487844 /Source/Parallelization
parente9e79322bde6a2b6cd6efc44508146c62325004d (diff)
downloadWarpX-c7c8a710cb1b99bc72343f4215011f3c0ec4f16e.tar.gz
WarpX-c7c8a710cb1b99bc72343f4215011f3c0ec4f16e.tar.zst
WarpX-c7c8a710cb1b99bc72343f4215011f3c0ec4f16e.zip
Implement PML for the outer RZ boundary with PSATD (#2211)
* Initial version of RZ PSATD PML BCs * Cleaned up some bugs * Add support of do_pml_in_domain option * Cleaned up stuff for building * Fix PMLPsatdAlgorithm macro * Removed unneeded variable from SpectralSolverRZ * Change length 3 arrays to length 2 (for 2D) * Cleanup around DampPML * Added more checks of pml[lev] * Added CI test for RZ PML * Added code to update the corner guard cells * Further updates * Added CI test * Fixed EOL space * Updated CI benchmarks, removing round off fields * Changes to CI missed on previous commit * Various fixes for clean up * More fixes for clean up * Further cleanup * Updated benchmark * Fixed benchmarks file * Minor cleanup * Added round off benchmark values * Fixed testname in analysis_pml_psatd_rz.py * Update comment in analysis file * Put pml_rz code in RZ and PSATD macro blocks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add geometry.dims input to CI test input file, inputs_rz * Cleanup to match recent changes Co-authored-by: Remi Lehe <remi.lehe@normalesup.org> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Diffstat (limited to 'Source/Parallelization')
-rw-r--r--Source/Parallelization/GuardCellManager.H6
-rw-r--r--Source/Parallelization/GuardCellManager.cpp13
-rw-r--r--Source/Parallelization/WarpXComm.cpp79
3 files changed, 78 insertions, 20 deletions
diff --git a/Source/Parallelization/GuardCellManager.H b/Source/Parallelization/GuardCellManager.H
index 182b4aa80..05d99de4d 100644
--- a/Source/Parallelization/GuardCellManager.H
+++ b/Source/Parallelization/GuardCellManager.H
@@ -44,6 +44,9 @@ public:
* \param do_electrostatic Whether to run in electrostatic mode i.e. solving the Poisson equation instead of the Maxwell equations.
* \param do_multi_J Whether to use the multi-J PSATD scheme
* \param fft_do_time_averaging Whether to average the E and B field in time (with PSATD) before interpolating them onto the macro-particles
+ * \param do_pml whether pml is turned on (only used by RZ PSATD)
+ * \param do_pml_in_domain whether pml is done in the domain (only used by RZ PSATD)
+ * \param pml_ncell number of cells on the pml layer (only used by RZ PSATD)
* \param ref_ratios mesh refinement ratios between mesh-refinement levels
*/
void Init(
@@ -65,6 +68,9 @@ public:
const int do_electrostatic,
const int do_multi_J,
const bool fft_do_time_averaging,
+ const bool do_pml,
+ const int do_pml_in_domain,
+ const int pml_ncell,
const amrex::Vector<amrex::IntVect>& ref_ratios);
// Guard cells allocated for MultiFabs E and B
diff --git a/Source/Parallelization/GuardCellManager.cpp b/Source/Parallelization/GuardCellManager.cpp
index 2fa0c4c4a..d09e46951 100644
--- a/Source/Parallelization/GuardCellManager.cpp
+++ b/Source/Parallelization/GuardCellManager.cpp
@@ -49,6 +49,9 @@ guardCellManager::Init (
const int do_electrostatic,
const int do_multi_J,
const bool fft_do_time_averaging,
+ const bool do_pml,
+ const int do_pml_in_domain,
+ const int pml_ncell,
const amrex::Vector<amrex::IntVect>& ref_ratios)
{
// When using subcycling, the particles on the fine level perform two pushes
@@ -202,6 +205,16 @@ guardCellManager::Init (
IntVect ngFFT = IntVect(ngFFt_z);
#endif
+#ifdef WARPX_DIM_RZ
+ if (do_pml) {
+ if (!do_pml_in_domain) {
+ ngFFT[0] = std::max(ngFFT[0], pml_ncell);
+ }
+ }
+#else
+ amrex::ignore_unused(do_pml, do_pml_in_domain, pml_ncell);
+#endif
+
// All boxes should have the same number of guard cells, to avoid temporary parallel copies:
// thus we take the maximum of the required number of guard cells over all available fields.
for (int i_dim = 0; i_dim < AMREX_SPACEDIM; i_dim++) {
diff --git a/Source/Parallelization/WarpXComm.cpp b/Source/Parallelization/WarpXComm.cpp
index d0f7e352a..9e4a7de88 100644
--- a/Source/Parallelization/WarpXComm.cpp
+++ b/Source/Parallelization/WarpXComm.cpp
@@ -9,6 +9,9 @@
#include "WarpX.H"
#include "BoundaryConditions/PML.H"
+#if (defined WARPX_DIM_RZ) && (defined WARPX_USE_PSATD)
+# include "BoundaryConditions/PML_RZ.H"
+#endif
#include "Filter/BilinearFilter.H"
#include "Utils/CoarsenMR.H"
#include "Utils/IntervalsParser.H"
@@ -548,14 +551,22 @@ WarpX::FillBoundaryE (int lev, PatchType patch_type, IntVect ng)
{
if (patch_type == PatchType::fine)
{
- if (do_pml && pml[lev]->ok())
- {
- pml[lev]->ExchangeE(patch_type,
- { Efield_fp[lev][0].get(),
- Efield_fp[lev][1].get(),
- Efield_fp[lev][2].get() },
- do_pml_in_domain);
- pml[lev]->FillBoundaryE(patch_type);
+ if (do_pml) {
+ if (pml[lev] && pml[lev]->ok())
+ {
+ pml[lev]->ExchangeE(patch_type,
+ { Efield_fp[lev][0].get(),
+ Efield_fp[lev][1].get(),
+ Efield_fp[lev][2].get() },
+ do_pml_in_domain);
+ pml[lev]->FillBoundaryE(patch_type);
+ }
+#if (defined WARPX_DIM_RZ) && (defined WARPX_USE_PSATD)
+ if (pml_rz[lev])
+ {
+ pml_rz[lev]->FillBoundaryE(patch_type);
+ }
+#endif
}
const amrex::Periodicity& period = Geom(lev).periodicity();
@@ -610,15 +621,25 @@ WarpX::FillBoundaryB (int lev, PatchType patch_type, IntVect ng)
{
if (patch_type == PatchType::fine)
{
- if (do_pml && pml[lev]->ok())
+ if (do_pml)
{
- pml[lev]->ExchangeB(patch_type,
- { Bfield_fp[lev][0].get(),
- Bfield_fp[lev][1].get(),
- Bfield_fp[lev][2].get() },
- do_pml_in_domain);
- pml[lev]->FillBoundaryB(patch_type);
+ if (pml[lev] && pml[lev]->ok())
+ {
+ pml[lev]->ExchangeB(patch_type,
+ { Bfield_fp[lev][0].get(),
+ Bfield_fp[lev][1].get(),
+ Bfield_fp[lev][2].get() },
+ do_pml_in_domain);
+ pml[lev]->FillBoundaryB(patch_type);
+ }
+#if (defined WARPX_DIM_RZ) && (defined WARPX_USE_PSATD)
+ if (pml_rz[lev])
+ {
+ pml_rz[lev]->FillBoundaryB(patch_type);
+ }
+#endif
}
+
const amrex::Periodicity& period = Geom(lev).periodicity();
if ( safe_guard_cells ) {
Vector<MultiFab*> mf{Bfield_fp[lev][0].get(),Bfield_fp[lev][1].get(),Bfield_fp[lev][2].get()};
@@ -777,7 +798,7 @@ WarpX::FillBoundaryF (int lev, PatchType patch_type, IntVect ng)
{
if (patch_type == PatchType::fine)
{
- if (do_pml && pml[lev]->ok())
+ if (do_pml && pml[lev] && pml[lev]->ok())
{
if (F_fp[lev]) pml[lev]->ExchangeF(patch_type, F_fp[lev].get(), do_pml_in_domain);
pml[lev]->FillBoundaryF(patch_type);
@@ -792,7 +813,7 @@ WarpX::FillBoundaryF (int lev, PatchType patch_type, IntVect ng)
}
else if (patch_type == PatchType::coarse)
{
- if (do_pml && pml[lev]->ok())
+ if (do_pml && pml[lev] && pml[lev]->ok())
{
if (F_cp[lev]) pml[lev]->ExchangeF(patch_type, F_cp[lev].get(), do_pml_in_domain);
pml[lev]->FillBoundaryF(patch_type);
@@ -821,7 +842,7 @@ void WarpX::FillBoundaryG (int lev, PatchType patch_type, IntVect ng)
{
if (patch_type == PatchType::fine)
{
- if (do_pml && pml[lev]->ok())
+ if (do_pml && pml[lev] && pml[lev]->ok())
{
if (G_fp[lev]) pml[lev]->ExchangeG(patch_type, G_fp[lev].get(), do_pml_in_domain);
pml[lev]->FillBoundaryG(patch_type);
@@ -836,7 +857,7 @@ void WarpX::FillBoundaryG (int lev, PatchType patch_type, IntVect ng)
}
else if (patch_type == PatchType::coarse)
{
- if (do_pml && pml[lev]->ok())
+ if (do_pml && pml[lev] && pml[lev]->ok())
{
if (G_cp[lev]) pml[lev]->ExchangeG(patch_type, G_cp[lev].get(), do_pml_in_domain);
pml[lev]->FillBoundaryG(patch_type);
@@ -1284,7 +1305,7 @@ void WarpX::NodalSyncPML (int lev)
void WarpX::NodalSyncPML (int lev, PatchType patch_type)
{
- if (pml[lev]->ok())
+ if (pml[lev] && pml[lev]->ok())
{
const std::array<amrex::MultiFab*,3>& pml_E = (patch_type == PatchType::fine) ?
pml[lev]->GetE_fp() : pml[lev]->GetE_cp();
@@ -1308,6 +1329,24 @@ void WarpX::NodalSyncPML (int lev, PatchType patch_type)
WarpXCommUtil::OverrideSync(*pml_G, period);
}
}
+
+#if (defined WARPX_DIM_RZ) && (defined WARPX_USE_PSATD)
+ if (pml_rz[lev])
+ {
+ // This is not actually needed with RZ PSATD since the
+ // arrays are always cell centered. Keep for now since
+ // it may be useful if the PML is used with FDTD
+ const std::array<amrex::MultiFab*,2> pml_rz_E = pml_rz[lev]->GetE_fp();
+ const std::array<amrex::MultiFab*,2> pml_rz_B = pml_rz[lev]->GetB_fp();
+
+ // Always synchronize nodal points
+ const amrex::Periodicity& period = Geom(lev).periodicity();
+ pml_rz_E[0]->OverrideSync(period);
+ pml_rz_E[1]->OverrideSync(period);
+ pml_rz_B[0]->OverrideSync(period);
+ pml_rz_B[1]->OverrideSync(period);
+ }
+#endif
}
void WarpX::NodalSync (amrex::Vector<std::array<std::unique_ptr<amrex::MultiFab>,3>>& mf_fp,