diff options
author | 2021-06-28 16:09:04 -0700 | |
---|---|---|
committer | 2021-06-28 16:09:04 -0700 | |
commit | 16d1ca457abaa8d057018b69adaa1c3b54d6f995 (patch) | |
tree | 29618ee601b824210035e022c1c38a76bed1c0be /Source/Evolve/WarpXEvolve.cpp | |
parent | a0ee8d81410833fe6480d3303eaa889708659bf7 (diff) | |
download | WarpX-16d1ca457abaa8d057018b69adaa1c3b54d6f995.tar.gz WarpX-16d1ca457abaa8d057018b69adaa1c3b54d6f995.tar.zst WarpX-16d1ca457abaa8d057018b69adaa1c3b54d6f995.zip |
Multi-J scheme (#1828)
* Introduce new option skip_deposition
* Properly implement the option to skip deposition
* Skip deposition for electrostatic solver
* Correct typo
* Add Index Enumerator and Equations for F/G Without Averaging
* Define new functions for current deposition and charge deposition
* Disable interpolation between levels
* Correct compilation error in RZ mode
* Add argument for relative time
* Add Index Enumerator and Equations for F/G With Averaging
* [skip ci] Add new OneStep function
* Fix compilation errors
* Correct more compilation errors
* [skip ci] Fix compilation
* Split the PSATD push into separate functions
* Add guards for rho field
* [skip ci] Use new functions in OneStep
* [skip ci] Separate the inverse transform of E_avg, B_avg
* Add deposition of rho
* [skip ci] Prevent deposition of rho if unallocated
* Fix error in deposition function
* Add subcycling of current deposition
* [skip ci] Add inverse transform of averaged fields
* [skip ci] Move component of rho
* Add function to copy J
* Temporarily deactivate contribution from F
* [skip ci] Implement call to linear in J
* Add psatd time averaging for multiJ
* [skip ci] Fix implementation of averaging
* [skip ci] Implement divE cleaning
* Fix Bug for RZ Builds
* Fix Bug for RZ Builds
* Fix Bug in Init of PML Spectral Solvers
* Cleaning
* Cleaning
* Add div(B) Cleaning (G Equation)
* Multi-J Not Implemented with Galilean PSATD or PMLs
* Add 2D CI Test Using Multi-J Scheme
* Add More Inline Comments
* Add More Inline Comments & Doxygen
* Add Doxygen for Constructor of SpectralSolver
* More Doxygen in Class SpectralSolver
* Add Doxygen for New Functions in WarpXPushFieldsEM.cpp
* Add Doxygen for New Functions in WarpX/MultiParticleContainer
* do_dive/b_cleaning Must Be True With linear_in_J Option
* Cast multij_n_depose to Real in Divisions
* New Input Syntax
* Add const where Possible, Fix Warnings
* Docs for New Input Syntax, Fix Abort Messages
* Consistent Use of Idx, IdxAvg, IdxLin
* Improve Documentation of psatd.J_linear_in_time
* Use const Type Qualifier whenever Possible
* Simplify Initialization of Pointer ion_lev
* Improve Doxygen
* Update documentation
* Add Note on NCI to Docs
* Make warpx.do_multi_J_n_depositions Not Optional
* Simplify Logic in getRequiredNumberOfFields
* Use More const Type Qualifiers
Co-authored-by: Edoardo Zoni <ezoni@lbl.gov>
Diffstat (limited to 'Source/Evolve/WarpXEvolve.cpp')
-rw-r--r-- | Source/Evolve/WarpXEvolve.cpp | 163 |
1 files changed, 149 insertions, 14 deletions
diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index b441efbb9..23a6470b3 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -157,20 +157,34 @@ WarpX::Evolve (int numsteps) // Main PIC operation: // gather fields, push particles, deposit sources, update fields - if ( do_electrostatic != ElectrostaticSolverAlgo::None ) { - // Special case: electrostatic solver. - // In this case, we only gather fields and push particles - // The deposition and calculation of fields is done further below - bool const skip_deposition=true; + + // Electrostatic case: only gather fields and push particles, + // deposition and calculation of fields done further below + if (do_electrostatic != ElectrostaticSolverAlgo::None) + { + const bool skip_deposition = true; PushParticlesandDepose(cur_time, skip_deposition); - } else if (do_subcycling == 0 || finest_level == 0) { + } + // Electromagnetic case: multi-J algorithm + else if (do_multi_J) + { + OneStep_multiJ(cur_time); + } + // Electromagnetic case: no subcycling or no mesh refinement + else if (do_subcycling == 0 || finest_level == 0) + { OneStep_nosub(cur_time); - // E : guard cells are up-to-date - // B : guard cells are NOT up-to-date - // F : guard cells are NOT up-to-date - } else if (do_subcycling == 1 && finest_level == 1) { + // E: guard cells are up-to-date + // B: guard cells are NOT up-to-date + // F: guard cells are NOT up-to-date + } + // Electromagnetic case: subcycling with one level of mesh refinement + else if (do_subcycling == 1 && finest_level == 1) + { OneStep_sub1(cur_time); - } else { + } + else + { amrex::Print() << "Error: do_subcycling = " << do_subcycling << std::endl; amrex::Abort("Unsupported do_subcycling type"); } @@ -372,7 +386,7 @@ WarpX::OneStep_nosub (Real cur_time) WarpX::Hybrid_QED_Push(dt); FillBoundaryE(guard_cells.ng_alloc_EB); } - PushPSATD(dt[0]); + PushPSATD(); FillBoundaryE(guard_cells.ng_alloc_EB); FillBoundaryB(guard_cells.ng_alloc_EB); @@ -435,6 +449,129 @@ WarpX::OneStep_nosub (Real cur_time) if (warpx_py_afterEsolve) warpx_py_afterEsolve(); } +void +WarpX::OneStep_multiJ (const amrex::Real cur_time) +{ +#ifdef WARPX_DIM_RZ + amrex::Abort("multi-J algorithm not implemented for RZ geometry"); +#else +#ifdef WARPX_USE_PSATD + if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) + { + // Push particle from x^{n} to x^{n+1} + // from p^{n-1/2} to p^{n+1/2} + const bool skip_deposition = true; + PushParticlesandDepose(cur_time, skip_deposition); + + // Initialize multi-J loop: + + // 1) Prepare E,B,F,G fields in spectral space + PSATDForwardTransformEB(); + PSATDForwardTransformF(); + PSATDForwardTransformG(); + + // 2) Set the averaged fields to zero + if (WarpX::fft_do_time_averaging) PSATDEraseAverageFields(); + + // 3) Deposit rho (in rho_new, since it will be moved during the loop) + if (WarpX::update_with_rho) + { + // Deposit rho at relative time -dt in component 1 (rho_new) + // (dt[0] denotes the time step on mesh refinement level 0) + mypc->DepositCharge(rho_fp, -dt[0], 1); + // Filter, exchange boundary, and interpolate across levels + SyncRho(); + // Forward FFT of rho_new + PSATDForwardTransformRho(1); + } + + // 4) Deposit J if needed + if (WarpX::J_linear_in_time) + { + // Deposit J at relative time -dt with time step dt + // (dt[0] denotes the time step on mesh refinement level 0) + mypc->DepositCurrent(current_fp, dt[0], -dt[0]); + // Filter, exchange boundary, and interpolate across levels + SyncCurrent(); + // Forward FFT of J + PSATDForwardTransformJ(); + } + + // Number of depositions for multi-J scheme + const int n_depose = WarpX::do_multi_J_n_depositions; + // Time sub-step for each multi-J deposition + const amrex::Real sub_dt = dt[0] / static_cast<amrex::Real>(n_depose); + // Whether to perform multi-J depositions on a time interval that spans + // one or two full time steps (from n*dt to (n+1)*dt, or from n*dt to (n+2)*dt) + const int n_loop = (WarpX::fft_do_time_averaging) ? 2*n_depose : n_depose; + + // Loop over multi-J depositions + for (int i_depose = 0; i_depose < n_loop; i_depose++) + { + // Move rho deposited previously, from new to old + PSATDMoveRhoNewToRhoOld(); + + // Move J deposited previously, from new to old + // (when using assumption of J linear in time) + if (WarpX::J_linear_in_time) PSATDMoveJNewToJOld(); + + const amrex::Real t_depose = (WarpX::J_linear_in_time) ? + (i_depose-n_depose+1)*sub_dt : (i_depose-n_depose+0.5)*sub_dt; + + // Deposit new J at relative time t_depose with time step dt + // (dt[0] denotes the time step on mesh refinement level 0) + mypc->DepositCurrent(current_fp, dt[0], t_depose); + // Filter, exchange boundary, and interpolate across levels + SyncCurrent(); + // Forward FFT of J + PSATDForwardTransformJ(); + + // Deposit new rho + if (WarpX::update_with_rho) + { + // Deposit rho at relative time (i_depose-n_depose+1)*sub_dt in component 1 (rho_new) + mypc->DepositCharge(rho_fp, (i_depose-n_depose+1)*sub_dt, 1); + // Filter, exchange boundary, and interpolate across levels + SyncRho(); + // Forward FFT of rho_new + PSATDForwardTransformRho(1); + } + + // Advance E,B,F,G fields in time and update the average fields + PSATDPushSpectralFields(); + + // Transform non-average fields E,B,F,G after n_depose pushes + // (the relative time reached here coincides with an integer full time step) + if (i_depose == n_depose-1) + { + PSATDBackwardTransformEB(); + PSATDBackwardTransformF(); + PSATDBackwardTransformG(); + } + } + + // Transform fields back to real space and exchange guard cells + if (WarpX::fft_do_time_averaging) + { + // We summed the integral of the field over 2*dt + PSATDScaleAverageFields(1._rt / (2._rt*dt[0])); + PSATDBackwardTransformEBavg(); + } + FillBoundaryE(guard_cells.ng_alloc_EB); + FillBoundaryB(guard_cells.ng_alloc_EB); + FillBoundaryF(guard_cells.ng_alloc_F); + FillBoundaryG(guard_cells.ng_alloc_G); + } + else + { + amrex::Abort("multi-J algorithm not implemented for FDTD"); + } +#else + amrex::Abort("multi-J algorithm not implemented for FDTD"); +#endif // WARPX_USE_PSATD +#endif // not WARPX_DIM_RZ +} + /* /brief Perform one PIC iteration, with subcycling * i.e. The fine patch uses a smaller timestep (and steps more often) * than the coarse patch, for the field advance and particle pusher. @@ -450,8 +587,6 @@ WarpX::OneStep_nosub (Real cur_time) * steps of the fine grid. * */ - - void WarpX::OneStep_sub1 (Real curtime) { |