From d81503dd97a4c8154feaec5a9fe2738bc8492cab Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Mon, 12 Jun 2023 15:40:45 -0700 Subject: Ohm's law solver (hybrid kinetic-fluid extension) (#3665) * Add "None" as an option for the Maxwell solver * fixed some of the reasons for failing CI tests * no longer pass `do_electrostatic` to `GuardCellManager` * renamed `MaxwellSolverAlgo` to `ElectromagneticSolverAlgo` * rename `do_electrostatic` to `electrostatic_solver_id` * rename `maxwell_solver_id` to `electromagnetic_solver_id` * start of hybrid solver logic * changes requested during PR review * remove `do_no_deposit` from tests without field evolution * added `HybridSolveE.cpp` * bulk of the hybrid solver implementation * mostly reproduce 1d cold ion mirror results * ion Bernstein modes reproduced with this version * fix bug with reduced diagnostic FieldProbe in 1d * added hybrid solver installation to PICMI and added example script generating ion-Bernstein modes * enable the use of `FieldProbe` default parameter values * use default field-probe values * steady progress * add `do_not_push` flag to picmi * possibly use nodal fields * added extra multifab for current calculated from curl B * added `CalculateTotalCurrent` functions * rewrote implementation to calculate J x curl B on a nodal grid first * fill boundary for auxiliary MFs used during hybrid E solve * properly handle nonzero resistivity * updated hybrid model example * clean up example scripts * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed invalid memory access for GPU and other code cleanups * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refinements on the example scripts * added ion beam instability example * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added EM modes and ion beam examples to CI test suite * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * started docs section on the hybrid model * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * more progress on documentation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added ion Landau damping verification test / example * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add checksum benchmark for Landau damping example * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added fields.py wrapper to access total current density in hybrid case * refactored the charge deposition fix to be performed with the field data rather than individual particles * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * also correct current density at PEC boundary * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * made resistivity a parsed function of `rho` * work on PEC boundary condition * corrections pointed out during code review * fix build fails due to unused variables * fix issue with GPU builds * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * actually apply rho boundary correction in EM case * take one sided derivative at PEC boundary when calculating div Pe * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * actually apply rho boundary correction in EM case * removed specific treatment of E-field on PEC boundary for Ohm's law solver * first round of CI fixes * second round of CI fixes * added description of deposition logic with PEC boundaries to documentation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * third round of CI fixing * move J and rho boundary handling to after `SyncRho` and `SyncCurrent` calls * properly order the application of boundary conditions on rho, for electrostatic simulations * fourth round of CI fixing * moved calculation of total current (Ampere's law) to seperate function * add random seed specification to `picmi` * code clean-up -> renamed hybrid model to hybrid-PIC model * added magnetic reconnection example * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * code cleanup & benchmark updates * update PICMI class name for hybrid solver to `HybridPICSolver` * don't apply J field boundary in * don't apply J field boundary in `MultiParticleContainer::DepositCurrent` * apply changes requested during code review * Apply suggestions from code review Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> * Loosen tolerance on failing CI test * Removed unused variable * code cleanup: make use of `MultiParticleContainer::DepositCurrent` in `AddSpaceChargeFieldLabFrame` * switch to using a rho_fp_temp multifab for old and averaged charge density field, also no longer require particles to move only one cell per step * use `ablastr::coarsen::sample` namespace in `HybridPICSolveECartesian` * switched to using `MultiFab::LinComb` instead of self written GPU kernels to calculated averaged or extrapolated current density * add verbosity flag for the Ohm solver tests * deal with fine versus coarse patches * add theoretical instability growth / damping rates to hybrid-PIC examples * update ion-Bernstein mode plot in documentation * move the `ApplyRhofieldBoundary` call to after `SumBoundary` * use a uniform calculation for the number of cells a given index is from the boundary * remove unused variable * limit number of ghost cells updated during PEC BC application * the number of ghost cells to consider depends on whether the field is nodal or not * attempt 1 to fix failing CI tests * attempt 2 to fix failing CI tests and code cleanup * attempt 3 to fix failing CI tests * attempt 4 to fix failing CI tests and docs cleanup * switched to using bibtex citations * move hybrid solver input parameters documentation to its own section * clean up ion beam instability analysis script * Apply suggestions from code review Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> * add inline comments describing the meaning of each argument for the `amrex::MultiFab::LinComb` calls used * make `HybridPICSolver` a child class of `picmistandard.base._ClassWithInit` * apply changes requested during code review * add warning about using hybrid-PIC solver with Esirkepov current deposition * add Stanier 2020 reference to recommend linear particles with hybrid-PIC * add call to FillBoundary for `current_fp` - needed for accurate interpolation to nodal grid * changes requested from code review * Apply suggestions from code review Co-authored-by: Remi Lehe * include physics accuracy check for ion beam instability; switch CI tests to use direct current deposition * reset benchmark values after switching to direct current deposition * update ion beam instability benchmark * minor changes requested during code review * remove guard cells for `enE_nodal_mf` as well as corresponding `FillBoundary` call * refactor: moved hybrid-PIC specific multifabs and `CalculateElectronPressure()` to `HybridPICModel` * add assert that load balancing is not used with the hybrid-PIC solver since the new multifabs are not yet properly redistributed * move `CalculateCurrentAmpere` to `HybridPICModel` * refactor: moved `HybridPICSolveE` to `HybridPICModel` class --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Co-authored-by: Remi Lehe --- .../HybridPICModel/HybridPICModel.cpp | 338 +++++++++++++++++++++ 1 file changed, 338 insertions(+) create mode 100644 Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp (limited to 'Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp') diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp new file mode 100644 index 000000000..9e04045d0 --- /dev/null +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -0,0 +1,338 @@ +/* Copyright 2023 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald (TAE Technologies) + * + * License: BSD-3-Clause-LBNL + */ + +#include "HybridPICModel.H" + +using namespace amrex; + +HybridPICModel::HybridPICModel ( int nlevs_max ) +{ + ReadParameters(); + AllocateMFs(nlevs_max); +} + +void HybridPICModel::ReadParameters () +{ + ParmParse pp_hybrid("hybrid_pic_model"); + + // The B-field update is subcycled to improve stability - the number + // of sub steps can be specified by the user (defaults to 50). + utils::parser::queryWithParser(pp_hybrid, "substeps", m_substeps); + + // The hybrid model requires an electron temperature, reference density + // and exponent to be given. These values will be used to calculate the + // electron pressure according to p = n0 * Te * (n/n0)^gamma + utils::parser::queryWithParser(pp_hybrid, "gamma", m_gamma); + if (!utils::parser::queryWithParser(pp_hybrid, "elec_temp", m_elec_temp)) { + Abort("hybrid_pic_model.elec_temp must be specified when using the hybrid solver"); + } + bool n0_ref_given = utils::parser::queryWithParser(pp_hybrid, "n0_ref", m_n0_ref); + if (m_gamma != 1.0 && !n0_ref_given) { + Abort("hybrid_pic_model.n0_ref should be specified if hybrid_pic_model.gamma != 1"); + } + + pp_hybrid.query("plasma_resistivity(rho)", m_eta_expression); + utils::parser::queryWithParser(pp_hybrid, "n_floor", m_n_floor); + + // convert electron temperature from eV to J + m_elec_temp *= PhysConst::q_e; +} + +void HybridPICModel::AllocateMFs (int nlevs_max) +{ + electron_pressure_fp.resize(nlevs_max); + rho_fp_temp.resize(nlevs_max); + current_fp_temp.resize(nlevs_max); + current_fp_ampere.resize(nlevs_max); +} + +void HybridPICModel::AllocateLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm, + const int ncomps, const IntVect& ngJ, const IntVect& ngRho, + const IntVect& jx_nodal_flag, + const IntVect& jy_nodal_flag, + const IntVect& jz_nodal_flag, + const IntVect& rho_nodal_flag) +{ + // set human-readable tag for each MultiFab + auto const tag = [lev]( std::string tagname ) { + tagname.append("[l=").append(std::to_string(lev)).append("]"); + return tagname; + }; + + auto & warpx = WarpX::GetInstance(); + + // The "electron_pressure_fp" multifab stores the electron pressure calculated + // from the specified equation of state. + // The "rho_fp_temp" multifab is used to store the ion charge density + // interpolated or extrapolated to appropriate timesteps. + // The "current_fp_temp" multifab is used to store the ion current density + // interpolated or extrapolated to appropriate timesteps. + // The "current_fp_ampere" multifab stores the total current calculated as + // the curl of B. + warpx.AllocInitMultiFab(electron_pressure_fp[lev], amrex::convert(ba, rho_nodal_flag), + dm, ncomps, ngRho, tag("electron_pressure_fp"), 0.0_rt); + + warpx.AllocInitMultiFab(rho_fp_temp[lev], amrex::convert(ba, rho_nodal_flag), + dm, ncomps, ngRho, tag("rho_fp_temp"), 0.0_rt); + + warpx.AllocInitMultiFab(current_fp_temp[lev][0], amrex::convert(ba, jx_nodal_flag), + dm, ncomps, ngJ, tag("current_fp_temp[x]"), 0.0_rt); + warpx.AllocInitMultiFab(current_fp_temp[lev][1], amrex::convert(ba, jy_nodal_flag), + dm, ncomps, ngJ, tag("current_fp_temp[y]"), 0.0_rt); + warpx.AllocInitMultiFab(current_fp_temp[lev][2], amrex::convert(ba, jz_nodal_flag), + dm, ncomps, ngJ, tag("current_fp_temp[z]"), 0.0_rt); + + warpx.AllocInitMultiFab(current_fp_ampere[lev][0], amrex::convert(ba, jx_nodal_flag), + dm, ncomps, ngJ, tag("current_fp_ampere[x]"), 0.0_rt); + warpx.AllocInitMultiFab(current_fp_ampere[lev][1], amrex::convert(ba, jy_nodal_flag), + dm, ncomps, ngJ, tag("current_fp_ampere[y]"), 0.0_rt); + warpx.AllocInitMultiFab(current_fp_ampere[lev][2], amrex::convert(ba, jz_nodal_flag), + dm, ncomps, ngJ, tag("current_fp_ampere[z]"), 0.0_rt); +} + +void HybridPICModel::ClearLevel (int lev) +{ + electron_pressure_fp[lev].reset(); + rho_fp_temp[lev].reset(); + for (int i = 0; i < 3; ++i) { + current_fp_temp[lev][i].reset(); + current_fp_ampere[lev][i].reset(); + } +} + +void HybridPICModel::InitData () +{ + m_resistivity_parser = std::make_unique( + utils::parser::makeParser(m_eta_expression, {"rho"})); + m_eta = m_resistivity_parser->compile<1>(); + + auto & warpx = WarpX::GetInstance(); + + // Get the grid staggering of the fields involved in calculating E + amrex::IntVect Jx_stag = warpx.getcurrent_fp(0,0).ixType().toIntVect(); + amrex::IntVect Jy_stag = warpx.getcurrent_fp(0,1).ixType().toIntVect(); + amrex::IntVect Jz_stag = warpx.getcurrent_fp(0,2).ixType().toIntVect(); + amrex::IntVect Bx_stag = warpx.getBfield_fp(0,0).ixType().toIntVect(); + amrex::IntVect By_stag = warpx.getBfield_fp(0,1).ixType().toIntVect(); + amrex::IntVect Bz_stag = warpx.getBfield_fp(0,2).ixType().toIntVect(); + amrex::IntVect Ex_stag = warpx.getEfield_fp(0,0).ixType().toIntVect(); + amrex::IntVect Ey_stag = warpx.getEfield_fp(0,1).ixType().toIntVect(); + amrex::IntVect Ez_stag = warpx.getEfield_fp(0,2).ixType().toIntVect(); + + // copy data to device + for ( int idim = 0; idim < AMREX_SPACEDIM; ++idim) { + Jx_IndexType[idim] = Jx_stag[idim]; + Jy_IndexType[idim] = Jy_stag[idim]; + Jz_IndexType[idim] = Jz_stag[idim]; + Bx_IndexType[idim] = Bx_stag[idim]; + By_IndexType[idim] = By_stag[idim]; + Bz_IndexType[idim] = Bz_stag[idim]; + Ex_IndexType[idim] = Ex_stag[idim]; + Ey_IndexType[idim] = Ey_stag[idim]; + Ez_IndexType[idim] = Ez_stag[idim]; + } + + // Below we set all the unused dimensions to have nodal values for J, B & E + // since these values will be interpolated onto a nodal grid - if this is + // not done the Interp function returns nonsense values. +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) || defined(WARPX_DIM_1D_Z) + Jx_IndexType[2] = 1; + Jy_IndexType[2] = 1; + Jz_IndexType[2] = 1; + Bx_IndexType[2] = 1; + By_IndexType[2] = 1; + Bz_IndexType[2] = 1; + Ex_IndexType[2] = 1; + Ey_IndexType[2] = 1; + Ez_IndexType[2] = 1; +#endif +#if defined(WARPX_DIM_1D_Z) + Jx_IndexType[1] = 1; + Jy_IndexType[1] = 1; + Jz_IndexType[1] = 1; + Bx_IndexType[1] = 1; + By_IndexType[1] = 1; + Bz_IndexType[1] = 1; + Ex_IndexType[1] = 1; + Ey_IndexType[1] = 1; + Ez_IndexType[1] = 1; +#endif +} + +void HybridPICModel::CalculateCurrentAmpere ( + amrex::Vector, 3>> const& Bfield, + amrex::Vector, 3>> const& edge_lengths) +{ + auto& warpx = WarpX::GetInstance(); + for (int lev = 0; lev <= warpx.finestLevel(); ++lev) + { + CalculateCurrentAmpere(Bfield[lev], edge_lengths[lev], lev); + } +} + +void HybridPICModel::CalculateCurrentAmpere ( + std::array< std::unique_ptr, 3> const& Bfield, + std::array< std::unique_ptr, 3> const& edge_lengths, + const int lev) +{ + WARPX_PROFILE("WarpX::CalculateCurrentAmpere()"); + + auto& warpx = WarpX::GetInstance(); + warpx.get_pointer_fdtd_solver_fp(lev)->CalculateCurrentAmpere( + current_fp_ampere[lev], Bfield, edge_lengths, lev + ); + + // we shouldn't apply the boundary condition to J since J = J_i - J_e but + // the boundary correction was already applied to J_i and the B-field + // boundary ensures that J itself complies with the boundary conditions, right? + // ApplyJfieldBoundary(lev, Jfield[0].get(), Jfield[1].get(), Jfield[2].get()); + for (int i=0; i<3; i++) current_fp_ampere[lev][i]->FillBoundary(warpx.Geom(lev).periodicity()); +} + +void HybridPICModel::HybridPICSolveE ( + amrex::Vector, 3>> & Efield, + amrex::Vector, 3>> const& Jfield, + amrex::Vector, 3>> const& Bfield, + amrex::Vector> const& rhofield, + amrex::Vector, 3>> const& edge_lengths, + DtType a_dt_type) +{ + auto& warpx = WarpX::GetInstance(); + for (int lev = 0; lev <= warpx.finestLevel(); ++lev) + { + HybridPICSolveE( + Efield[lev], Jfield[lev], Bfield[lev], rhofield[lev], + edge_lengths[lev], lev, a_dt_type + ); + } +} + +void HybridPICModel::HybridPICSolveE ( + std::array< std::unique_ptr, 3> & Efield, + std::array< std::unique_ptr, 3> const& Jfield, + std::array< std::unique_ptr, 3> const& Bfield, + std::unique_ptr const& rhofield, + std::array< std::unique_ptr, 3> const& edge_lengths, + const int lev, DtType a_dt_type) +{ + WARPX_PROFILE("WarpX::HybridPICSolveE()"); + + HybridPICSolveE( + Efield, Jfield, Bfield, rhofield, edge_lengths, lev, + PatchType::fine, a_dt_type + ); + if (lev > 0) + { + amrex::Abort(Utils::TextMsg::Err( + "HybridPICSolveE: Only one level implemented for hybrid-PIC solver.")); + } +} + +void HybridPICModel::HybridPICSolveE ( + std::array< std::unique_ptr, 3> & Efield, + std::array< std::unique_ptr, 3> const& Jfield, + std::array< std::unique_ptr, 3> const& Bfield, + std::unique_ptr const& rhofield, + std::array< std::unique_ptr, 3> const& edge_lengths, + const int lev, PatchType patch_type, DtType a_dt_type) +{ + auto& warpx = WarpX::GetInstance(); + + // Solve E field in regular cells + // The first half step uses t=n quantities, the second half t=n+1/2 + // quantities and the full step uses t=n+1 quantities + if (a_dt_type == DtType::FirstHalf) { + warpx.get_pointer_fdtd_solver_fp(lev)->HybridPICSolveE( + Efield, current_fp_ampere[lev], + current_fp_temp[lev], Bfield, + rho_fp_temp[lev], + electron_pressure_fp[lev], + edge_lengths, lev, this, a_dt_type + ); + } + else if (a_dt_type == DtType::SecondHalf) { + warpx.get_pointer_fdtd_solver_fp(lev)->HybridPICSolveE( + Efield, current_fp_ampere[lev], + Jfield, Bfield, + rho_fp_temp[lev], + electron_pressure_fp[lev], + edge_lengths, lev, this, a_dt_type + ); + } + else { + warpx.get_pointer_fdtd_solver_fp(lev)->HybridPICSolveE( + Efield, current_fp_ampere[lev], + current_fp_temp[lev], Bfield, + rhofield, + electron_pressure_fp[lev], + edge_lengths, lev, this, a_dt_type + ); + } + + warpx.ApplyEfieldBoundary(lev, patch_type); +} + +void HybridPICModel::CalculateElectronPressure(DtType a_dt_type) +{ + auto& warpx = WarpX::GetInstance(); + for (int lev = 0; lev <= warpx.finestLevel(); ++lev) + { + CalculateElectronPressure(lev, a_dt_type); + } +} + +void HybridPICModel::CalculateElectronPressure(const int lev, DtType a_dt_type) +{ + WARPX_PROFILE("WarpX::CalculateElectronPressure()"); + + auto& warpx = WarpX::GetInstance(); + // The full step uses rho^{n+1}, otherwise use the old or averaged + // charge density. + if (a_dt_type == DtType::Full) { + FillElectronPressureMF( + electron_pressure_fp[lev], warpx.get_pointer_rho_fp(lev) + ); + } else { + FillElectronPressureMF( + electron_pressure_fp[lev], rho_fp_temp[lev].get() + ); + } + warpx.ApplyElectronPressureBoundary(lev, PatchType::fine); + electron_pressure_fp[lev]->FillBoundary(warpx.Geom(lev).periodicity()); +} + + +void HybridPICModel::FillElectronPressureMF ( + std::unique_ptr const& Pe_field, + amrex::MultiFab* const& rho_field ) +{ + const auto n0_ref = m_n0_ref; + const auto elec_temp = m_elec_temp; + const auto gamma = m_gamma; + + // Loop through the grids, and over the tiles within each grid +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for ( MFIter mfi(*Pe_field, TilingIfNotGPU()); mfi.isValid(); ++mfi ) + { + // Extract field data for this grid/tile + Array4 const& rho = rho_field->const_array(mfi); + Array4 const& Pe = Pe_field->array(mfi); + + // Extract tileboxes for which to loop + const Box& tilebox = mfi.tilebox(); + + ParallelFor(tilebox, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + Pe(i, j, k) = ElectronPressure::get_pressure( + n0_ref, elec_temp, gamma, rho(i, j, k) + ); + }); + } +} -- cgit v1.2.3