diff options
author | 2021-06-28 16:09:04 -0700 | |
---|---|---|
committer | 2021-06-28 16:09:04 -0700 | |
commit | 16d1ca457abaa8d057018b69adaa1c3b54d6f995 (patch) | |
tree | 29618ee601b824210035e022c1c38a76bed1c0be /Source/Particles/WarpXParticleContainer.cpp | |
parent | a0ee8d81410833fe6480d3303eaa889708659bf7 (diff) | |
download | WarpX-16d1ca457abaa8d057018b69adaa1c3b54d6f995.tar.gz WarpX-16d1ca457abaa8d057018b69adaa1c3b54d6f995.tar.zst WarpX-16d1ca457abaa8d057018b69adaa1c3b54d6f995.zip |
Multi-J scheme (#1828)
* Introduce new option skip_deposition
* Properly implement the option to skip deposition
* Skip deposition for electrostatic solver
* Correct typo
* Add Index Enumerator and Equations for F/G Without Averaging
* Define new functions for current deposition and charge deposition
* Disable interpolation between levels
* Correct compilation error in RZ mode
* Add argument for relative time
* Add Index Enumerator and Equations for F/G With Averaging
* [skip ci] Add new OneStep function
* Fix compilation errors
* Correct more compilation errors
* [skip ci] Fix compilation
* Split the PSATD push into separate functions
* Add guards for rho field
* [skip ci] Use new functions in OneStep
* [skip ci] Separate the inverse transform of E_avg, B_avg
* Add deposition of rho
* [skip ci] Prevent deposition of rho if unallocated
* Fix error in deposition function
* Add subcycling of current deposition
* [skip ci] Add inverse transform of averaged fields
* [skip ci] Move component of rho
* Add function to copy J
* Temporarily deactivate contribution from F
* [skip ci] Implement call to linear in J
* Add psatd time averaging for multiJ
* [skip ci] Fix implementation of averaging
* [skip ci] Implement divE cleaning
* Fix Bug for RZ Builds
* Fix Bug for RZ Builds
* Fix Bug in Init of PML Spectral Solvers
* Cleaning
* Cleaning
* Add div(B) Cleaning (G Equation)
* Multi-J Not Implemented with Galilean PSATD or PMLs
* Add 2D CI Test Using Multi-J Scheme
* Add More Inline Comments
* Add More Inline Comments & Doxygen
* Add Doxygen for Constructor of SpectralSolver
* More Doxygen in Class SpectralSolver
* Add Doxygen for New Functions in WarpXPushFieldsEM.cpp
* Add Doxygen for New Functions in WarpX/MultiParticleContainer
* do_dive/b_cleaning Must Be True With linear_in_J Option
* Cast multij_n_depose to Real in Divisions
* New Input Syntax
* Add const where Possible, Fix Warnings
* Docs for New Input Syntax, Fix Abort Messages
* Consistent Use of Idx, IdxAvg, IdxLin
* Improve Documentation of psatd.J_linear_in_time
* Use const Type Qualifier whenever Possible
* Simplify Initialization of Pointer ion_lev
* Improve Doxygen
* Update documentation
* Add Note on NCI to Docs
* Make warpx.do_multi_J_n_depositions Not Optional
* Simplify Logic in getRequiredNumberOfFields
* Use More const Type Qualifiers
Co-authored-by: Edoardo Zoni <ezoni@lbl.gov>
Diffstat (limited to 'Source/Particles/WarpXParticleContainer.cpp')
-rw-r--r-- | Source/Particles/WarpXParticleContainer.cpp | 102 |
1 files changed, 74 insertions, 28 deletions
diff --git a/Source/Particles/WarpXParticleContainer.cpp b/Source/Particles/WarpXParticleContainer.cpp index abb2a224c..564ffa706 100644 --- a/Source/Particles/WarpXParticleContainer.cpp +++ b/Source/Particles/WarpXParticleContainer.cpp @@ -499,6 +499,47 @@ WarpXParticleContainer::DepositCurrent (WarpXParIter& pti, #endif } +void +WarpXParticleContainer::DepositCurrent ( + amrex::Vector<std::array< std::unique_ptr<amrex::MultiFab>, 3 > >& J, + const amrex::Real dt, const amrex::Real relative_t) +{ + // Loop over the refinement levels + int const finest_level = J.size() - 1; + for (int lev = 0; lev <= finest_level; ++lev) + { + // Loop over particle tiles and deposit current on each level +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) + { + int thread_num = omp_get_thread_num(); +#else + int thread_num = 0; +#endif + for (WarpXParIter pti(*this, lev); pti.isValid(); ++pti) + { + const long np = pti.numParticles(); + const auto & wp = pti.GetAttribs(PIdx::w); + const auto & uxp = pti.GetAttribs(PIdx::ux); + const auto & uyp = pti.GetAttribs(PIdx::uy); + const auto & uzp = pti.GetAttribs(PIdx::uz); + + int* AMREX_RESTRICT ion_lev = nullptr; + if (do_field_ionization) + { + ion_lev = pti.GetiAttribs(particle_icomps["ionization_level"]).dataPtr(); + } + + DepositCurrent(pti, wp, uxp, uyp, uzp, ion_lev, + J[lev][0].get(), J[lev][1].get(), J[lev][2].get(), + 0, np, thread_num, lev, lev, dt, relative_t/dt); + } +#ifdef AMREX_USE_OMP + } +#endif + } +} + /* \brief Charge Deposition for thread thread_num * \param pti : Particle iterator * \param wp : Array of particle weights @@ -666,18 +707,21 @@ WarpXParticleContainer::DepositCharge (WarpXParIter& pti, RealVector& wp, void WarpXParticleContainer::DepositCharge (amrex::Vector<std::unique_ptr<amrex::MultiFab> >& rho, - bool local, bool reset, - bool do_rz_volume_scaling) + const bool local, const bool reset, + const bool do_rz_volume_scaling, + const bool interpolate_across_levels, + const int icomp) { #ifdef WARPX_DIM_RZ (void)do_rz_volume_scaling; #endif // Loop over the refinement levels int const finest_level = rho.size() - 1; - for (int lev = 0; lev <= finest_level; ++lev) { - - // Reset the `rho` array if `reset` is True - if (reset) rho[lev]->setVal(0.0, rho[lev]->nGrowVect()); + for (int lev = 0; lev <= finest_level; ++lev) + { + // Reset the rho array if reset is True + int const nc = WarpX::ncomps; + if (reset) rho[lev]->setVal(0., icomp*nc, nc, rho[lev]->nGrowVect()); // Loop over particle tiles and deposit charge on each level #ifdef AMREX_USE_OMP @@ -692,21 +736,21 @@ WarpXParticleContainer::DepositCharge (amrex::Vector<std::unique_ptr<amrex::Mult const long np = pti.numParticles(); auto& wp = pti.GetAttribs(PIdx::w); - int* AMREX_RESTRICT ion_lev; - if (do_field_ionization){ + int* AMREX_RESTRICT ion_lev = nullptr; + if (do_field_ionization) + { ion_lev = pti.GetiAttribs(particle_icomps["ionization_level"]).dataPtr(); - } else { - ion_lev = nullptr; } - DepositCharge(pti, wp, ion_lev, rho[lev].get(), 0, 0, np, thread_num, lev, lev); + DepositCharge(pti, wp, ion_lev, rho[lev].get(), icomp, 0, np, thread_num, lev, lev); } #ifdef AMREX_USE_OMP } #endif #ifdef WARPX_DIM_RZ - if (do_rz_volume_scaling) { + if (do_rz_volume_scaling) + { WarpX::GetInstance().ApplyInverseVolumeScalingToChargeDensity(rho[lev].get(), lev); } #else @@ -714,22 +758,24 @@ WarpXParticleContainer::DepositCharge (amrex::Vector<std::unique_ptr<amrex::Mult #endif // Exchange guard cells - if (!local) rho[lev]->SumBoundary( m_gdb->Geom(lev).periodicity() ); + if (local == false) rho[lev]->SumBoundary(m_gdb->Geom(lev).periodicity()); } // Now that the charge has been deposited at each level, // we average down from fine to crse - for (int lev = finest_level - 1; lev >= 0; --lev) { - const DistributionMapping& fine_dm = rho[lev+1]->DistributionMap(); - BoxArray coarsened_fine_BA = rho[lev+1]->boxArray(); - coarsened_fine_BA.coarsen(m_gdb->refRatio(lev)); - MultiFab coarsened_fine_data(coarsened_fine_BA, fine_dm, rho[lev+1]->nComp(), 0); - coarsened_fine_data.setVal(0.0); - - int const refinement_ratio = 2; - - CoarsenMR::Coarsen( coarsened_fine_data, *rho[lev+1], IntVect(refinement_ratio) ); - rho[lev]->ParallelAdd( coarsened_fine_data, m_gdb->Geom(lev).periodicity() ); + if (interpolate_across_levels) + { + for (int lev = finest_level - 1; lev >= 0; --lev) + { + const DistributionMapping& fine_dm = rho[lev+1]->DistributionMap(); + BoxArray coarsened_fine_BA = rho[lev+1]->boxArray(); + coarsened_fine_BA.coarsen(m_gdb->refRatio(lev)); + MultiFab coarsened_fine_data(coarsened_fine_BA, fine_dm, rho[lev+1]->nComp(), 0); + coarsened_fine_data.setVal(0.0); + const int refinement_ratio = 2; + CoarsenMR::Coarsen(coarsened_fine_data, *rho[lev+1], IntVect(refinement_ratio)); + rho[lev]->ParallelAdd(coarsened_fine_data, m_gdb->Geom(lev).periodicity()); + } } } @@ -789,7 +835,7 @@ WarpXParticleContainer::GetChargeDensity (int lev, bool local) WarpX::GetInstance().ApplyInverseVolumeScalingToChargeDensity(rho.get(), lev); #endif - if (!local) rho->SumBoundary(gm.periodicity()); + if (local == false) rho->SumBoundary(gm.periodicity()); return rho; } @@ -814,7 +860,7 @@ Real WarpXParticleContainer::sumParticleCharge(bool local) { } } - if (!local) ParallelDescriptor::ReduceRealSum(total_charge); + if (local == false) ParallelDescriptor::ReduceRealSum(total_charge); total_charge *= this->charge; return total_charge; } @@ -890,7 +936,7 @@ std::array<Real, 3> WarpXParticleContainer::meanParticleVelocity(bool local) { } } - if (!local) { + if (local == false) { ParallelDescriptor::ReduceRealSum(vx_total); ParallelDescriptor::ReduceRealSum(vy_total); ParallelDescriptor::ReduceRealSum(vz_total); @@ -929,7 +975,7 @@ Real WarpXParticleContainer::maxParticleVelocity(bool local) { } } - if (!local) ParallelAllReduce::Max(max_v, ParallelDescriptor::Communicator()); + if (local == false) ParallelAllReduce::Max(max_v, ParallelDescriptor::Communicator()); return max_v; } |