diff options
author | 2021-06-28 16:09:04 -0700 | |
---|---|---|
committer | 2021-06-28 16:09:04 -0700 | |
commit | 16d1ca457abaa8d057018b69adaa1c3b54d6f995 (patch) | |
tree | 29618ee601b824210035e022c1c38a76bed1c0be | |
parent | a0ee8d81410833fe6480d3303eaa889708659bf7 (diff) | |
download | WarpX-16d1ca457abaa8d057018b69adaa1c3b54d6f995.tar.gz WarpX-16d1ca457abaa8d057018b69adaa1c3b54d6f995.tar.zst WarpX-16d1ca457abaa8d057018b69adaa1c3b54d6f995.zip |
Multi-J scheme (#1828)
* Introduce new option skip_deposition
* Properly implement the option to skip deposition
* Skip deposition for electrostatic solver
* Correct typo
* Add Index Enumerator and Equations for F/G Without Averaging
* Define new functions for current deposition and charge deposition
* Disable interpolation between levels
* Correct compilation error in RZ mode
* Add argument for relative time
* Add Index Enumerator and Equations for F/G With Averaging
* [skip ci] Add new OneStep function
* Fix compilation errors
* Correct more compilation errors
* [skip ci] Fix compilation
* Split the PSATD push into separate functions
* Add guards for rho field
* [skip ci] Use new functions in OneStep
* [skip ci] Separate the inverse transform of E_avg, B_avg
* Add deposition of rho
* [skip ci] Prevent deposition of rho if unallocated
* Fix error in deposition function
* Add subcycling of current deposition
* [skip ci] Add inverse transform of averaged fields
* [skip ci] Move component of rho
* Add function to copy J
* Temporarily deactivate contribution from F
* [skip ci] Implement call to linear in J
* Add psatd time averaging for multiJ
* [skip ci] Fix implementation of averaging
* [skip ci] Implement divE cleaning
* Fix Bug for RZ Builds
* Fix Bug for RZ Builds
* Fix Bug in Init of PML Spectral Solvers
* Cleaning
* Cleaning
* Add div(B) Cleaning (G Equation)
* Multi-J Not Implemented with Galilean PSATD or PMLs
* Add 2D CI Test Using Multi-J Scheme
* Add More Inline Comments
* Add More Inline Comments & Doxygen
* Add Doxygen for Constructor of SpectralSolver
* More Doxygen in Class SpectralSolver
* Add Doxygen for New Functions in WarpXPushFieldsEM.cpp
* Add Doxygen for New Functions in WarpX/MultiParticleContainer
* do_dive/b_cleaning Must Be True With linear_in_J Option
* Cast multij_n_depose to Real in Divisions
* New Input Syntax
* Add const where Possible, Fix Warnings
* Docs for New Input Syntax, Fix Abort Messages
* Consistent Use of Idx, IdxAvg, IdxLin
* Improve Documentation of psatd.J_linear_in_time
* Use const Type Qualifier whenever Possible
* Simplify Initialization of Pointer ion_lev
* Improve Doxygen
* Update documentation
* Add Note on NCI to Docs
* Make warpx.do_multi_J_n_depositions Not Optional
* Simplify Logic in getRequiredNumberOfFields
* Use More const Type Qualifiers
Co-authored-by: Edoardo Zoni <ezoni@lbl.gov>
22 files changed, 1342 insertions, 221 deletions
diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 4d9dfac2b..80e012c5d 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -1315,6 +1315,14 @@ Numerics and algorithms ``amr.max_level = 1``. More information can be found at https://ieeexplore.ieee.org/document/8659392. +* ``warpx.do_multi_J`` (`0` or `1`; default: `0`) + Whether to use the multi-J algorithm, where current deposition and field update are performed multiple times within each time step. The number of sub-steps is determined by the input parameter ``warpx.do_multi_J_n_depositions``. Unlike sub-cycling, field gathering is performed only once per time step, as in regular PIC cycles. For simulations with strong numerical Cherenkov instability (NCI), it is recommended to use the multi-J algorithm in combination with ``psatd.do_time_averaging = 1``. + +* ``warpx.do_multi_J_n_depositions`` (integer) + Number of sub-steps to use with the multi-J algorithm, when ``warpx.do_multi_J = 1``. + Note that this input parameter is not optional and must always be set in all input files where ``warpx.do_multi_J = 1``. No default value is provided automatically. + + * ``psatd.nox``, ``psatd.noy``, ``pstad.noz`` (`integer`) optional (default `16` for all) The order of accuracy of the spatial derivatives, when using the code compiled with a PSATD solver. If ``psatd.periodic_single_box_fft`` is used, these can be set to ``inf`` for infinite-order PSATD. @@ -1450,6 +1458,9 @@ Numerics and algorithms * ``psatd.do_time_averaging`` (`0` or `1`; default: 0) Whether to use an averaged Galilean PSATD algorithm or standard Galilean PSATD. +* ``psatd.J_linear_in_time`` (`0` or `1`; default: `0`) + Whether to perform linear interpolation of two distinct currents deposited at the beginning and the end of the time step (``psatd.J_linear_in_time = 1``), instead of using one single current deposited at half time (``psatd.J_linear_in_time = 0``), for the field update in Fourier space. Currently requires ``psatd.update_with_rho = 1``, ``warpx.do_dive_cleaning = 1``, and ``warpx.do_divb_cleaning = 1``. + * ``warpx.override_sync_intervals`` (`string`) optional (default `1`) Using the `Intervals parser`_ syntax, this string defines the timesteps at which synchronization of sources (`rho` and `J`) and fields (`E` and `B`) on grid nodes at box diff --git a/Examples/Tests/multi_J/inputs_2d b/Examples/Tests/multi_J/inputs_2d new file mode 100644 index 000000000..2bf505f97 --- /dev/null +++ b/Examples/Tests/multi_J/inputs_2d @@ -0,0 +1,147 @@ +# Iterations +max_step = 200 + +# Domain decomposition +amr.n_cell = 256 256 +warpx.numprocs = 1 2 + +# Mesh refinement and geometry +amr.max_level = 0 +geometry.coord_sys = 0 +geometry.is_periodic = 1 0 +geometry.prob_lo = -200e-6 -220e-6 +geometry.prob_hi = 200e-6 10e-6 + +# Algorithms +algo.current_deposition = direct +algo.field_gathering = energy-conserving +algo.maxwell_solver = psatd +algo.particle_pusher = vay +algo.particle_shape = 3 + +# Numerics +warpx.cfl = 3.19 +warpx.do_nodal = 1 +warpx.do_pml = 0 +warpx.use_filter = 1 +warpx.verbose = 1 + +# Boosted frame +warpx.boost_direction = z +warpx.gamma_boost = 2.870114028490 + +# Moving window +warpx.do_moving_window = 1 +warpx.moving_window_dir = z +warpx.moving_window_v = 1. + +# Spectral solver +psatd.do_time_averaging = 1 +psatd.J_linear_in_time = 1 +psatd.update_with_rho = 1 + +# Multi-J scheme +warpx.do_multi_J = 1 +warpx.do_multi_J_n_depositions = 2 +warpx.do_dive_cleaning = 1 +warpx.do_divb_cleaning = 1 + +# Particles +particles.species_names = driver driver_back plasma_e plasma_p +particles.use_fdtd_nci_corr = 0 +particles.rigid_injected_species = driver driver_back + +# Driver (electrons) +driver.species_type = electron +driver.injection_style = "gaussian_beam" +driver.x_rms = 5e-6 +driver.y_rms = 5e-6 +driver.z_rms = 20.1e-6 +driver.x_m = 0. +driver.y_m = 0. +driver.z_m = -80e-6 +driver.npart = 100000 +driver.q_tot = -1e-10 +driver.momentum_distribution_type = "gaussian" +driver.ux_m = 0. +driver.uy_m = 0. +driver.uz_m = 2e9 +driver.ux_th = 0. +driver.uy_th = 0. +driver.uz_th = 0. +driver.zinject_plane = 2e-3 +driver.rigid_advance = true +driver.projected = true +driver.focused = false +driver.initialize_self_fields = 0 +driver.do_symmetrize = 1 + +# Driver (positrons) +driver_back.species_type = positron +driver_back.injection_style = "gaussian_beam" +driver_back.x_rms = 5e-6 +driver_back.y_rms = 5e-6 +driver_back.z_rms = 20.1e-6 +driver_back.x_m = 0. +driver_back.y_m = 0. +driver_back.z_m = -80e-6 +driver_back.npart = 100000 +driver_back.q_tot = 1e-10 +driver_back.momentum_distribution_type = "gaussian" +driver_back.ux_m = 0. +driver_back.uy_m = 0. +driver_back.uz_m = 2e9 +driver_back.ux_th = 0. +driver_back.uy_th = 0. +driver_back.uz_th = 0. +driver_back.zinject_plane = 2e-3 +driver_back.rigid_advance = true +driver_back.projected = true +driver_back.focused = false +driver_back.initialize_self_fields = 0 +driver_back.do_symmetrize = 1 +driver_back.do_backward_propagation = true + +# Electrons +plasma_e.species_type = electron +plasma_e.injection_style = "NUniformPerCell" +plasma_e.zmin = 0. +plasma_e.zmax = 0.05 +plasma_e.xmin = -180e-6 +plasma_e.xmax = 180e-6 +plasma_e.ymin = -180e-6 +plasma_e.ymax = 180e-6 +plasma_e.profile = constant +plasma_e.density = 1e23 +plasma_e.num_particles_per_cell_each_dim = 1 1 1 +plasma_e.momentum_distribution_type = "constant" +plasma_e.ux = 0. +plasma_e.uy = 0. +plasma_e.uz = 0. +plasma_e.do_continuous_injection = 1 + +# Hydrogen +plasma_p.species_type = hydrogen +plasma_p.injection_style = "NUniformPerCell" +plasma_p.zmin = 0. +plasma_p.zmax = 0.05 +plasma_p.xmin = -180e-6 +plasma_p.xmax = 180e-6 +plasma_p.ymin = -180e-6 +plasma_p.ymax = 180e-6 +plasma_p.profile = constant +plasma_p.density = 1e23 +plasma_p.num_particles_per_cell_each_dim = 1 1 1 +plasma_p.momentum_distribution_type = "constant" +plasma_p.ux = 0. +plasma_p.uy = 0. +plasma_p.uz = 0. +plasma_p.do_continuous_injection = 1 + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 200 +diag1.diag_type = Full +diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho rho_driver rho_driver_back rho_plasma_e rho_plasma_p +diag1.write_species = 1 +diag1.species = driver plasma_e plasma_p diff --git a/Regression/Checksum/benchmarks_json/multi_J_2d_psatd.json b/Regression/Checksum/benchmarks_json/multi_J_2d_psatd.json new file mode 100644 index 000000000..7788a9d85 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/multi_J_2d_psatd.json @@ -0,0 +1,48 @@ +{ + "driver": { + "particle_cpu": 0.0, + "particle_id": 5000050000.0, + "particle_momentum_x": 4.2468004298680323e-16, + "particle_momentum_y": 0.0, + "particle_momentum_z": 9.82279023786301e-09, + "particle_position_x": 0.3979349343731913, + "particle_position_y": 55.175199387968405, + "particle_weight": 124830181489215.27 + }, + "lev=0": { + "Bx": 0.0, + "By": 938797.6239373332, + "Bz": 0.0, + "Ex": 275592022698142.3, + "Ey": 0.0, + "Ez": 93204573102613.78, + "jx": 9857319741191368.0, + "jy": 0.0, + "jz": 6.4634613927785736e+16, + "rho": 221319690.13710034, + "rho_driver": 2562225.1199331162, + "rho_driver_back": 0.0, + "rho_plasma_e": 2698164888.5847692, + "rho_plasma_p": 2699287913.2703176 + }, + "plasma_e": { + "particle_cpu": 58858.0, + "particle_id": 7388900221.0, + "particle_momentum_x": 7.296651628257412e-19, + "particle_momentum_y": 0.0, + "particle_momentum_z": 4.494602499033915e-17, + "particle_position_x": 5.338124877733829, + "particle_position_y": 26.54016202205885, + "particle_weight": 1.3186130303279046e+17 + }, + "plasma_p": { + "particle_cpu": 58880.0, + "particle_id": 7417282880.0, + "particle_momentum_x": 1.4768074009968963e-18, + "particle_momentum_y": 0.0, + "particle_momentum_z": 7.942647962730444e-14, + "particle_position_x": 5.290048066122284, + "particle_position_y": 26.56734131597245, + "particle_weight": 1.3191059027779915e+17 + } +}
\ No newline at end of file diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 76917e05c..9836a70e4 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -1937,6 +1937,24 @@ particleTypes = electrons ions analysisRoutine = Examples/Tests/galilean/analysis_3d.py tolerance = 1e-4 +[multi_J_2d_psatd] +buildDir = . +inputFile = Examples/Tests/multi_J/inputs_2d +runtime_params = +dim = 2 +addToCompileString = USE_PSATD=TRUE +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = driver driver_back plasma_e plasma_p +analysisRoutine = Examples/analysis_default_regression.py +tolerance = 1e-14 + [ElectrostaticSphere] buildDir = . inputFile = Examples/Tests/ElectrostaticSphere/inputs_3d diff --git a/Source/BoundaryConditions/PML.H b/Source/BoundaryConditions/PML.H index 58343f0f3..ced9d0fac 100644 --- a/Source/BoundaryConditions/PML.H +++ b/Source/BoundaryConditions/PML.H @@ -115,6 +115,7 @@ public: int ncell, int delta, amrex::IntVect ref_ratio, amrex::Real dt, int nox_fft, int noy_fft, int noz_fft, bool do_nodal, int do_moving_window, int pml_has_particles, int do_pml_in_domain, + const bool J_linear_in_time, const bool do_pml_dive_cleaning, const bool do_pml_divb_cleaning, const amrex::IntVect do_pml_Lo = amrex::IntVect::TheUnitVector(), const amrex::IntVect do_pml_Hi = amrex::IntVect::TheUnitVector()); diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp index c1c4d55a0..3cdb7b9db 100644 --- a/Source/BoundaryConditions/PML.cpp +++ b/Source/BoundaryConditions/PML.cpp @@ -452,6 +452,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& /*g int ncell, int delta, amrex::IntVect ref_ratio, Real dt, int nox_fft, int noy_fft, int noz_fft, bool do_nodal, int do_moving_window, int /*pml_has_particles*/, int do_pml_in_domain, + const bool J_linear_in_time, const bool do_pml_dive_cleaning, const bool do_pml_divb_cleaning, const amrex::IntVect do_pml_Lo, const amrex::IntVect do_pml_Hi) : m_dive_cleaning(do_pml_dive_cleaning), @@ -603,7 +604,8 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& /*g realspace_ba.enclosedCells().grow(nge); // cell-centered + guard cells spectral_solver_fp = std::make_unique<SpectralSolver>(lev, realspace_ba, dm, nox_fft, noy_fft, noz_fft, do_nodal, v_galilean_zero, v_comoving_zero, dx, dt, in_pml, - periodic_single_box, update_with_rho, fft_do_time_averaging, m_dive_cleaning, m_divb_cleaning); + periodic_single_box, update_with_rho, fft_do_time_averaging, + J_linear_in_time, m_dive_cleaning, m_divb_cleaning); #endif } @@ -710,7 +712,8 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& /*g realspace_cba.enclosedCells().grow(nge); // cell-centered + guard cells spectral_solver_cp = std::make_unique<SpectralSolver>(lev, realspace_cba, cdm, nox_fft, noy_fft, noz_fft, do_nodal, v_galilean_zero, v_comoving_zero, cdx, dt, in_pml, - periodic_single_box, update_with_rho, fft_do_time_averaging, m_dive_cleaning, m_divb_cleaning); + periodic_single_box, update_with_rho, fft_do_time_averaging, + J_linear_in_time, m_dive_cleaning, m_divb_cleaning); #endif } } diff --git a/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp index 74972d906..e8d7828cb 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp @@ -53,12 +53,12 @@ RhoFunctor::operator() ( amrex::MultiFab& mf_dst, const int dcomp, const int /*i warpx.ApplyFilterandSumBoundaryRho(m_lev, m_lev, *rho, 0, rho->nComp()); #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_PSATD) - using Idx = SpectralAvgFieldIndex; + using IdxAvg = SpectralFieldIndexTimeAveraging; if (WarpX::use_kspace_filter) { auto & solver = warpx.get_spectral_solver_fp(m_lev); - solver.ForwardTransform(m_lev, *rho, Idx::rho_new); - solver.ApplyFilter(Idx::rho_new); - solver.BackwardTransform(m_lev, *rho, Idx::rho_new); + solver.ForwardTransform(m_lev, *rho, IdxAvg::rho_new); + solver.ApplyFilter(IdxAvg::rho_new); + solver.BackwardTransform(m_lev, *rho, IdxAvg::rho_new); } #endif diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index b441efbb9..23a6470b3 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -157,20 +157,34 @@ WarpX::Evolve (int numsteps) // Main PIC operation: // gather fields, push particles, deposit sources, update fields - if ( do_electrostatic != ElectrostaticSolverAlgo::None ) { - // Special case: electrostatic solver. - // In this case, we only gather fields and push particles - // The deposition and calculation of fields is done further below - bool const skip_deposition=true; + + // Electrostatic case: only gather fields and push particles, + // deposition and calculation of fields done further below + if (do_electrostatic != ElectrostaticSolverAlgo::None) + { + const bool skip_deposition = true; PushParticlesandDepose(cur_time, skip_deposition); - } else if (do_subcycling == 0 || finest_level == 0) { + } + // Electromagnetic case: multi-J algorithm + else if (do_multi_J) + { + OneStep_multiJ(cur_time); + } + // Electromagnetic case: no subcycling or no mesh refinement + else if (do_subcycling == 0 || finest_level == 0) + { OneStep_nosub(cur_time); - // E : guard cells are up-to-date - // B : guard cells are NOT up-to-date - // F : guard cells are NOT up-to-date - } else if (do_subcycling == 1 && finest_level == 1) { + // E: guard cells are up-to-date + // B: guard cells are NOT up-to-date + // F: guard cells are NOT up-to-date + } + // Electromagnetic case: subcycling with one level of mesh refinement + else if (do_subcycling == 1 && finest_level == 1) + { OneStep_sub1(cur_time); - } else { + } + else + { amrex::Print() << "Error: do_subcycling = " << do_subcycling << std::endl; amrex::Abort("Unsupported do_subcycling type"); } @@ -372,7 +386,7 @@ WarpX::OneStep_nosub (Real cur_time) WarpX::Hybrid_QED_Push(dt); FillBoundaryE(guard_cells.ng_alloc_EB); } - PushPSATD(dt[0]); + PushPSATD(); FillBoundaryE(guard_cells.ng_alloc_EB); FillBoundaryB(guard_cells.ng_alloc_EB); @@ -435,6 +449,129 @@ WarpX::OneStep_nosub (Real cur_time) if (warpx_py_afterEsolve) warpx_py_afterEsolve(); } +void +WarpX::OneStep_multiJ (const amrex::Real cur_time) +{ +#ifdef WARPX_DIM_RZ + amrex::Abort("multi-J algorithm not implemented for RZ geometry"); +#else +#ifdef WARPX_USE_PSATD + if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) + { + // Push particle from x^{n} to x^{n+1} + // from p^{n-1/2} to p^{n+1/2} + const bool skip_deposition = true; + PushParticlesandDepose(cur_time, skip_deposition); + + // Initialize multi-J loop: + + // 1) Prepare E,B,F,G fields in spectral space + PSATDForwardTransformEB(); + PSATDForwardTransformF(); + PSATDForwardTransformG(); + + // 2) Set the averaged fields to zero + if (WarpX::fft_do_time_averaging) PSATDEraseAverageFields(); + + // 3) Deposit rho (in rho_new, since it will be moved during the loop) + if (WarpX::update_with_rho) + { + // Deposit rho at relative time -dt in component 1 (rho_new) + // (dt[0] denotes the time step on mesh refinement level 0) + mypc->DepositCharge(rho_fp, -dt[0], 1); + // Filter, exchange boundary, and interpolate across levels + SyncRho(); + // Forward FFT of rho_new + PSATDForwardTransformRho(1); + } + + // 4) Deposit J if needed + if (WarpX::J_linear_in_time) + { + // Deposit J at relative time -dt with time step dt + // (dt[0] denotes the time step on mesh refinement level 0) + mypc->DepositCurrent(current_fp, dt[0], -dt[0]); + // Filter, exchange boundary, and interpolate across levels + SyncCurrent(); + // Forward FFT of J + PSATDForwardTransformJ(); + } + + // Number of depositions for multi-J scheme + const int n_depose = WarpX::do_multi_J_n_depositions; + // Time sub-step for each multi-J deposition + const amrex::Real sub_dt = dt[0] / static_cast<amrex::Real>(n_depose); + // Whether to perform multi-J depositions on a time interval that spans + // one or two full time steps (from n*dt to (n+1)*dt, or from n*dt to (n+2)*dt) + const int n_loop = (WarpX::fft_do_time_averaging) ? 2*n_depose : n_depose; + + // Loop over multi-J depositions + for (int i_depose = 0; i_depose < n_loop; i_depose++) + { + // Move rho deposited previously, from new to old + PSATDMoveRhoNewToRhoOld(); + + // Move J deposited previously, from new to old + // (when using assumption of J linear in time) + if (WarpX::J_linear_in_time) PSATDMoveJNewToJOld(); + + const amrex::Real t_depose = (WarpX::J_linear_in_time) ? + (i_depose-n_depose+1)*sub_dt : (i_depose-n_depose+0.5)*sub_dt; + + // Deposit new J at relative time t_depose with time step dt + // (dt[0] denotes the time step on mesh refinement level 0) + mypc->DepositCurrent(current_fp, dt[0], t_depose); + // Filter, exchange boundary, and interpolate across levels + SyncCurrent(); + // Forward FFT of J + PSATDForwardTransformJ(); + + // Deposit new rho + if (WarpX::update_with_rho) + { + // Deposit rho at relative time (i_depose-n_depose+1)*sub_dt in component 1 (rho_new) + mypc->DepositCharge(rho_fp, (i_depose-n_depose+1)*sub_dt, 1); + // Filter, exchange boundary, and interpolate across levels + SyncRho(); + // Forward FFT of rho_new + PSATDForwardTransformRho(1); + } + + // Advance E,B,F,G fields in time and update the average fields + PSATDPushSpectralFields(); + + // Transform non-average fields E,B,F,G after n_depose pushes + // (the relative time reached here coincides with an integer full time step) + if (i_depose == n_depose-1) + { + PSATDBackwardTransformEB(); + PSATDBackwardTransformF(); + PSATDBackwardTransformG(); + } + } + + // Transform fields back to real space and exchange guard cells + if (WarpX::fft_do_time_averaging) + { + // We summed the integral of the field over 2*dt + PSATDScaleAverageFields(1._rt / (2._rt*dt[0])); + PSATDBackwardTransformEBavg(); + } + FillBoundaryE(guard_cells.ng_alloc_EB); + FillBoundaryB(guard_cells.ng_alloc_EB); + FillBoundaryF(guard_cells.ng_alloc_F); + FillBoundaryG(guard_cells.ng_alloc_G); + } + else + { + amrex::Abort("multi-J algorithm not implemented for FDTD"); + } +#else + amrex::Abort("multi-J algorithm not implemented for FDTD"); +#endif // WARPX_USE_PSATD +#endif // not WARPX_DIM_RZ +} + /* /brief Perform one PIC iteration, with subcycling * i.e. The fine patch uses a smaller timestep (and steps more often) * than the coarse patch, for the field advance and particle pusher. @@ -450,8 +587,6 @@ WarpX::OneStep_nosub (Real cur_time) * steps of the fine grid. * */ - - void WarpX::OneStep_sub1 (Real curtime) { diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.H index 7b917284b..b7f349bb0 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.H +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.H @@ -41,6 +41,8 @@ class PsatdAlgorithm : public SpectralBaseAlgorithm * \param[in] dt time step of the simulation * \param[in] update_with_rho whether the update equation for E uses rho or not * \param[in] time_averaging whether to use time averaging for large time steps + * \param[in] J_linear_in_time whether to use two currents computed at the beginning and the end + * of the time interval (instead of using one current computed at half time) */ PsatdAlgorithm ( const SpectralKSpace& spectral_kspace, @@ -52,7 +54,8 @@ class PsatdAlgorithm : public SpectralBaseAlgorithm const amrex::Array<amrex::Real,3>& v_galilean, const amrex::Real dt, const bool update_with_rho, - const bool time_averaging); + const bool time_averaging, + const bool J_linear_in_time); /** * \brief Updates the E and B fields in spectral space, according to the relevant PSATD equations @@ -66,12 +69,22 @@ class PsatdAlgorithm : public SpectralBaseAlgorithm */ virtual int getRequiredNumberOfFields () const override final { - if (m_time_averaging) { - return SpectralAvgFieldIndex::n_fields; - } else { - return SpectralFieldIndex::n_fields; + if (m_J_linear_in_time) + { + return SpectralFieldIndexJLinearInTime::n_fields; } - } + else + { + if (m_time_averaging) + { + return SpectralFieldIndexTimeAveraging::n_fields; + } + else + { + return SpectralFieldIndex::n_fields; + } + } + }; /** * \brief Initializes the coefficients used in \c pushSpectralFields to update the E and B fields @@ -86,6 +99,19 @@ class PsatdAlgorithm : public SpectralBaseAlgorithm const amrex::Real dt); /** + * \brief Initialize additional coefficients used in \c pushSpectralFields to update E,B, + * required only when using time averaging with the assumption that J is linear in time + * + * \param[in] spectral_kspace spectral space + * \param[in] dm distribution mapping + * \param[in] dt time step of the simulation + */ + void InitializeSpectralCoefficientsAvgLin ( + const SpectralKSpace& spectral_kspace, + const amrex::DistributionMapping& dm, + const amrex::Real dt); + + /** * \brief Initializes additional coefficients used in \c pushSpectralFields to update the E and B fields, * required only when using time averaging with large time steps * @@ -138,6 +164,8 @@ class PsatdAlgorithm : public SpectralBaseAlgorithm SpectralRealCoefficients C_coef, S_ck_coef; SpectralComplexCoefficients T2_coef, X1_coef, X2_coef, X3_coef, X4_coef; + SpectralComplexCoefficients X5_coef, X6_coef; + // These real and complex coefficients are allocated only with averaged Galilean PSATD SpectralComplexCoefficients Psi1_coef, Psi2_coef, Y1_coef, Y2_coef, Y3_coef, Y4_coef; @@ -153,6 +181,7 @@ class PsatdAlgorithm : public SpectralBaseAlgorithm amrex::Real m_dt; bool m_update_with_rho; bool m_time_averaging; + bool m_J_linear_in_time; bool m_is_galilean; }; #endif // WARPX_USE_PSATD diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.cpp index 8a5b791ab..b454d79ba 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.cpp @@ -36,7 +36,8 @@ PsatdAlgorithm::PsatdAlgorithm( const amrex::Array<amrex::Real,3>& v_galilean, const amrex::Real dt, const bool update_with_rho, - const bool time_averaging) + const bool time_averaging, + const bool J_linear_in_time) // Initializer list : SpectralBaseAlgorithm(spectral_kspace, dm, norder_x, norder_y, norder_z, nodal), // Initialize the centered finite-order modified k vectors: @@ -52,7 +53,8 @@ PsatdAlgorithm::PsatdAlgorithm( m_v_galilean(v_galilean), m_dt(dt), m_update_with_rho(update_with_rho), - m_time_averaging(time_averaging) + m_time_averaging(time_averaging), + m_J_linear_in_time(J_linear_in_time) { const amrex::BoxArray& ba = spectral_kspace.spectralspace_ba; @@ -65,6 +67,7 @@ PsatdAlgorithm::PsatdAlgorithm( X2_coef = SpectralComplexCoefficients(ba, dm, 1, 0); X3_coef = SpectralComplexCoefficients(ba, dm, 1, 0); + // Allocate these coefficients only with Galilean PSATD if (m_is_galilean) { X4_coef = SpectralComplexCoefficients(ba, dm, 1, 0); @@ -73,8 +76,8 @@ PsatdAlgorithm::PsatdAlgorithm( InitializeSpectralCoefficients(spectral_kspace, dm, dt); - // Allocate these coefficients only with averaged Galilean PSATD - if (time_averaging) + // Allocate these coefficients only with time averaging + if (time_averaging && !J_linear_in_time) { Psi1_coef = SpectralComplexCoefficients(ba, dm, 1, 0); Psi2_coef = SpectralComplexCoefficients(ba, dm, 1, 0); @@ -82,9 +85,16 @@ PsatdAlgorithm::PsatdAlgorithm( Y3_coef = SpectralComplexCoefficients(ba, dm, 1, 0); Y2_coef = SpectralComplexCoefficients(ba, dm, 1, 0); Y4_coef = SpectralComplexCoefficients(ba, dm, 1, 0); - InitializeSpectralCoefficientsAveraging(spectral_kspace, dm, dt); } + // Allocate these coefficients only with time averaging + // and with the assumption that J is linear in time + else if (time_averaging && J_linear_in_time) + { + X5_coef = SpectralComplexCoefficients(ba, dm, 1, 0); + X6_coef = SpectralComplexCoefficients(ba, dm, 1, 0); + InitializeSpectralCoefficientsAvgLin(spectral_kspace, dm, dt); + } } void @@ -92,8 +102,11 @@ PsatdAlgorithm::pushSpectralFields (SpectralFieldData& f) const { const bool update_with_rho = m_update_with_rho; const bool time_averaging = m_time_averaging; + const bool J_linear_in_time = m_J_linear_in_time; const bool is_galilean = m_is_galilean; + const amrex::Real dt = m_dt; + // Loop over boxes for (amrex::MFIter mfi(f.fields); mfi.isValid(); ++mfi) { @@ -125,7 +138,7 @@ PsatdAlgorithm::pushSpectralFields (SpectralFieldData& f) const amrex::Array4<const Complex> Y3_arr; amrex::Array4<const Complex> Y4_arr; - if (time_averaging) + if (time_averaging && !J_linear_in_time) { Psi1_arr = Psi1_coef[mfi].array(); Psi2_arr = Psi2_coef[mfi].array(); @@ -135,6 +148,14 @@ PsatdAlgorithm::pushSpectralFields (SpectralFieldData& f) const Y4_arr = Y4_coef[mfi].array(); } + Array4<const Complex> X5_arr; + Array4<const Complex> X6_arr; + if (time_averaging && J_linear_in_time) + { + X5_arr = X5_coef[mfi].array(); + X6_arr = X6_coef[mfi].array(); + } + // Extract pointers for the k vectors const amrex::Real* modified_kx_arr = modified_kx_vec[mfi].dataPtr(); #if (AMREX_SPACEDIM == 3) @@ -146,7 +167,8 @@ PsatdAlgorithm::pushSpectralFields (SpectralFieldData& f) const ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept { using Idx = SpectralFieldIndex; - using AvgIdx = SpectralAvgFieldIndex; + using IdxAvg = SpectralFieldIndexTimeAveraging; + using IdxLin = SpectralFieldIndexJLinearInTime; // Record old values of the fields to be updated const Complex Ex_old = fields(i,j,k,Idx::Ex); @@ -173,7 +195,9 @@ PsatdAlgorithm::pushSpectralFields (SpectralFieldData& f) const const amrex::Real kz = modified_kz_arr[j]; #endif // Physical constants and imaginary unit - const amrex::Real c2 = std::pow(PhysConst::c, 2); + constexpr Real c2 = PhysConst::c * PhysConst::c; + constexpr Real ep0 = PhysConst::ep0; + constexpr Real inv_ep0 = 1._rt / PhysConst::ep0; constexpr Complex I = Complex{0._rt, 1._rt}; // These coefficients are initialized in the function InitializeSpectralCoefficients @@ -239,9 +263,83 @@ PsatdAlgorithm::pushSpectralFields (SpectralFieldData& f) const - I * T2 * S_ck * (kx * Ey_old - ky * Ex_old) + I * X1 * (kx * Jy - ky * Jx); - // Additional update equations for averaged Galilean algorithm + if (J_linear_in_time) + { + const Complex Jx_new = fields(i,j,k,IdxLin::Jx_new); + const Complex Jy_new = fields(i,j,k,IdxLin::Jy_new); + const Complex Jz_new = fields(i,j,k,IdxLin::Jz_new); + + const Complex F_old = fields(i,j,k,IdxLin::F); + const Complex G_old = fields(i,j,k,IdxLin::G); + + fields(i,j,k,Idx::Ex) += -X1 * (Jx_new - Jx) / dt + I * c2 * S_ck * F_old * kx; + + fields(i,j,k,Idx::Ey) += -X1 * (Jy_new - Jy) / dt + I * c2 * S_ck * F_old * ky; + + fields(i,j,k,Idx::Ez) += -X1 * (Jz_new - Jz) / dt + I * c2 * S_ck * F_old * kz; + + fields(i,j,k,Idx::Bx) += I * X2/c2 * (ky * (Jz_new - Jz) - kz * (Jy_new - Jy)); + + I * c2 * S_ck * G_old * kx; + + fields(i,j,k,Idx::By) += I * X2/c2 * (kz * (Jx_new - Jx) - kx * (Jz_new - Jz)); + + I * c2 * S_ck * G_old * ky; + + fields(i,j,k,Idx::Bz) += I * X2/c2 * (kx * (Jy_new - Jy) - ky * (Jx_new - Jx)); + + I * c2 * S_ck * G_old * kz; + + const Complex k_dot_J = kx * Jx + ky * Jy + kz * Jz; + const Complex k_dot_dJ = kx * (Jx_new - Jx) + ky * (Jy_new - Jy) + kz * (Jz_new - Jz); + const Complex k_dot_E = kx * Ex_old + ky * Ey_old + kz * Ez_old; + const Complex k_dot_B = kx * Bx_old + ky * By_old + kz * Bz_old; - if (time_averaging) + fields(i,j,k,IdxLin::F) = C * F_old + S_ck * (I * k_dot_E - rho_old * inv_ep0) + - X1 * ((rho_new - rho_old) / dt + I * k_dot_J) - I * X2/c2 * k_dot_dJ; + + fields(i,j,k,IdxLin::G) = C * G_old + I * S_ck * k_dot_B; + + if (time_averaging) + { + const Complex X5 = X5_arr(i,j,k); + const Complex X6 = X6_arr(i,j,k); + + // TODO: Here the code is *accumulating* the average, + // because it is meant to be used with sub-cycling + // maybe this should be made more generic + + fields(i,j,k,IdxLin::Ex_avg) += S_ck * Ex_old + + I * c2 * ep0 * X1 * (ky * Bz_old - kz * By_old) + + I * X5 * rho_old * kx + I * X6 * rho_new * kx + + X3/c2 * Jx - X2/c2 * Jx_new + I * c2 * ep0 * X1 * F_old * kx; + + fields(i,j,k,IdxLin::Ey_avg) += S_ck * Ey_old + + I * c2 * ep0 * X1 * (kz * Bx_old - kx * Bz_old) + + I * X5 * rho_old * ky + I * X6 * rho_new * ky + + X3/c2 * Jy - X2/c2 * Jy_new + I * c2 * ep0 * X1 * F_old * ky; + + fields(i,j,k,IdxLin::Ez_avg) += S_ck * Ez_old + + I * c2 * ep0 * X1 * (kx * By_old - ky * Bx_old) + + I * X5 * rho_old * kz + I * X6 * rho_new * kz + + X3/c2 * Jz - X2/c2 * Jz_new + I * c2 * ep0 * X1 * F_old * kz; + + fields(i,j,k,IdxLin::Bx_avg) += S_ck * Bx_old + - I * ep0 * X1 * (ky * Ez_old - kz * Ey_old) + - I * X5/c2 * (ky * Jz - kz * Jy) - I * X6/c2 * (ky * Jz_new - kz * Jy_new); + + I * c2 * ep0 * X1 * G_old * kx; + + fields(i,j,k,IdxLin::By_avg) += S_ck * By_old + - I * ep0 * X1 * (kz * Ex_old - kx * Ez_old) + - I * X5/c2 * (kz * Jx - kx * Jz) - I * X6/c2 * (kz * Jx_new - kx * Jz_new); + + I * c2 * ep0 * X1 * G_old * ky; + + fields(i,j,k,IdxLin::Bz_avg) += S_ck * Bz_old + - I * ep0 * X1 * (kx * Ey_old - ky * Ex_old) + - I * X5/c2 * (kx * Jy - ky * Jx) - I * X6/c2 * (kx * Jy_new - ky * Jx_new); + + I * c2 * ep0 * X1 * G_old * kz; + } + } + + // Additional update equations for averaged Galilean algorithm + if (time_averaging && !J_linear_in_time) { // These coefficients are initialized in the function InitializeSpectralCoefficients below const Complex Psi1 = Psi1_arr(i,j,k); @@ -251,27 +349,27 @@ PsatdAlgorithm::pushSpectralFields (SpectralFieldData& f) const const Complex Y2 = Y2_arr(i,j,k); const Complex Y4 = Y4_arr(i,j,k); - fields(i,j,k,AvgIdx::Ex_avg) = Psi1 * Ex_old + fields(i,j,k,IdxAvg::Ex_avg) = Psi1 * Ex_old - I * c2 * Psi2 * (ky * Bz_old - kz * By_old) + Y4 * Jx + (Y2 * rho_new + Y3 * rho_old) * kx; - fields(i,j,k,AvgIdx::Ey_avg) = Psi1 * Ey_old + fields(i,j,k,IdxAvg::Ey_avg) = Psi1 * Ey_old - I * c2 * Psi2 * (kz * Bx_old - kx * Bz_old) + Y4 * Jy + (Y2 * rho_new + Y3 * rho_old) * ky; - fields(i,j,k,AvgIdx::Ez_avg) = Psi1 * Ez_old + fields(i,j,k,IdxAvg::Ez_avg) = Psi1 * Ez_old - I * c2 * Psi2 * (kx * By_old - ky * Bx_old) + Y4 * Jz + (Y2 * rho_new + Y3 * rho_old) * kz; - fields(i,j,k,AvgIdx::Bx_avg) = Psi1 * Bx_old + fields(i,j,k,IdxAvg::Bx_avg) = Psi1 * Bx_old + I * Psi2 * (ky * Ez_old - kz * Ey_old) + I * Y1 * (ky * Jz - kz * Jy); - fields(i,j,k,AvgIdx::By_avg) = Psi1 * By_old + fields(i,j,k,IdxAvg::By_avg) = Psi1 * By_old + I * Psi2 * (kz * Ex_old - kx * Ez_old) + I * Y1 * (kz * Jx - kx * Jz); - fields(i,j,k,AvgIdx::Bz_avg) = Psi1 * Bz_old + fields(i,j,k,IdxAvg::Bz_avg) = Psi1 * Bz_old + I * Psi2 * (kx * Ey_old - ky * Ex_old) + I * Y1 * (kx * Jy - ky * Jx); } @@ -672,6 +770,76 @@ void PsatdAlgorithm::InitializeSpectralCoefficientsAveraging ( } } +void PsatdAlgorithm::InitializeSpectralCoefficientsAvgLin ( + const SpectralKSpace& spectral_kspace, + const amrex::DistributionMapping& dm, + const amrex::Real dt) +{ + const BoxArray& ba = spectral_kspace.spectralspace_ba; + + // Loop over boxes and allocate the corresponding coefficients for each box + for (MFIter mfi(ba, dm); mfi.isValid(); ++mfi) + { + const Box& bx = ba[mfi]; + + // Extract pointers for the k vectors + const Real* kx_s = modified_kx_vec[mfi].dataPtr(); +#if (AMREX_SPACEDIM==3) + const Real* ky_s = modified_ky_vec[mfi].dataPtr(); +#endif + const Real* kz_s = modified_kz_vec[mfi].dataPtr(); + + Array4<Real const> C = C_coef[mfi].array(); + Array4<Real const> S_ck = S_ck_coef[mfi].array(); + + Array4<Complex> X5 = X5_coef[mfi].array(); + Array4<Complex> X6 = X6_coef[mfi].array(); + + // Loop over indices within one box + ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept + { + // Calculate norm of k vector + const Real knorm_s = std::sqrt( + std::pow(kx_s[i], 2) + +#if (AMREX_SPACEDIM==3) + std::pow(ky_s[j], 2) + std::pow(kz_s[k], 2)); +#else + std::pow(kz_s[j], 2)); +#endif + // Physical constants and imaginary unit + constexpr Real c = PhysConst::c; + constexpr Real c2 = c*c; + constexpr Real ep0 = PhysConst::ep0; + + // Auxiliary coefficients + const Real dt3 = dt * dt * dt; + + const Real om_s = c * knorm_s; + const Real om2_s = om_s * om_s; + const Real om4_s = om2_s * om2_s; + + if (om_s != 0.) + { + X5(i,j,k) = c2 / ep0 * (S_ck(i,j,k) / om2_s - (1._rt - C(i,j,k)) / (om4_s * dt) + - 0.5_rt * dt / om2_s); + } + else + { + X5(i,j,k) = - c2 * dt3 / (8._rt * ep0); + } + + if (om_s != 0.) + { + X6(i,j,k) = c2 / ep0 * ((1._rt - C(i,j,k)) / (om4_s * dt) - 0.5_rt * dt / om2_s); + } + else + { + X6(i,j,k) = - c2 * dt3 / (24._rt * ep0); + } + }); + } +} + void PsatdAlgorithm::CurrentCorrection ( const int lev, diff --git a/Source/FieldSolver/SpectralSolver/SpectralFieldData.H b/Source/FieldSolver/SpectralSolver/SpectralFieldData.H index a1fed5cce..4999a268d 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralFieldData.H +++ b/Source/FieldSolver/SpectralSolver/SpectralFieldData.H @@ -37,8 +37,14 @@ struct SpectralFieldIndex { enum { Ex=0, Ey, Ez, Bx, By, Bz, Jx, Jy, Jz, rho_old, rho_new, n_fields, divE=3 }; }; +struct SpectralFieldIndexJLinearInTime { + enum { Ex=0, Ey, Ez, Bx, By, Bz, Jx_old, Jy_old, Jz_old, rho_old, rho_new, + Ex_avg, Ey_avg, Ez_avg, Bx_avg, By_avg, Bz_avg, + Jx_new, Jy_new, Jz_new, F, G, n_fields, divE=3 }; +}; + /* Index for the regular fields + averaged fields, when stored in spectral space */ -struct SpectralAvgFieldIndex { +struct SpectralFieldIndexTimeAveraging { enum { Ex=0, Ey, Ez, Bx, By, Bz, Jx, Jy, Jz, rho_old, rho_new, Ex_avg, Ey_avg, Ez_avg, Bx_avg, By_avg, Bz_avg,n_fields }; // n_fields is automatically the total number of fields }; diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolver.H b/Source/FieldSolver/SpectralSolver/SpectralSolver.H index 996053a88..e5d84a2af 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolver.H +++ b/Source/FieldSolver/SpectralSolver/SpectralSolver.H @@ -32,12 +32,40 @@ class SpectralSolver { public: - // Inline definition of the member functions of `SpectralSolver`, - // except the constructor (see `SpectralSolver.cpp`) - // The body of these functions is short, since the work is done in the - // underlying classes `SpectralFieldData` and `PsatdAlgorithm` - // Constructor + /** + * \brief Constructor of the class SpectralSolver + * + * Select the spectral algorithm to be used, allocate the corresponding coefficients + * for the discrete field update equations, and prepare the structures that store + * the fields in spectral space. + * + * \param[in] lev mesh refinement level + * \param[in] realspace_ba BoxArray in real space + * \param[in] dm DistributionMapping for the given BoxArray + * \param[in] norder_x spectral order along x + * \param[in] norder_y spectral order along y + * \param[in] norder_z spectral order along z + * \param[in] nodal whether the spectral solver is applied to a nodal or staggered grid + * \param[in] v_galilean three-dimensional vector containing the components of the Galilean + * velocity for the standard or averaged Galilean PSATD solvers + * \param[in] v_comoving three-dimensional vector containing the components of the comoving + * velocity for the comoving PSATD solver + * \param[in] dx AMREX_SPACEDIM-dimensional vector containing the cell sizes along each direction + * \param[in] dt time step for the analytical integration of Maxwell's equations + * \param[in] pml whether the boxes in the given BoxArray are PML boxes + * \param[in] periodic_single_box whether there is only one periodic single box + * (no domain decomposition) + * \param[in] update_with_rho whether rho is used in the field update equations + * \param[in] fft_do_time_averaging whether the time averaging algorithm is used + * \param[in] J_linear_in_time whether to use two currents computed at the beginning and + * the end of the time interval (instead of using one current + * compute at half time) + * \param[in] dive_cleaning whether to use div(E) cleaning to account for errors in Gauss law + * (new field F in the field update equations) + * \param[in] divb_cleaning whether to use div(B) cleaning to account for errors in magnetic Gauss law + * (new field G in the field update equations) + */ SpectralSolver (const int lev, const amrex::BoxArray& realspace_ba, const amrex::DistributionMapping& dm, @@ -47,10 +75,11 @@ class SpectralSolver const amrex::Array<amrex::Real,3>& v_comoving, const amrex::RealVect dx, const amrex::Real dt, - const bool pml=false, - const bool periodic_single_box=false, - const bool update_with_rho=false, - const bool fft_do_time_averaging=false, + const bool pml = false, + const bool periodic_single_box = false, + const bool update_with_rho = false, + const bool fft_do_time_averaging = false, + const bool J_linear_in_time = false, const bool dive_cleaning = false, const bool divb_cleaning = false); @@ -117,7 +146,46 @@ class SpectralSolver algorithm->VayDeposition(lev, field_data, current); } + /** + * \brief Copy spectral data from component \c src_comp to component \c dest_comp + * of \c field_data.fields. + * + * \param[in] src_comp component of the source FabArray from which the data are copied + * \param[in] dest_comp component of the destination FabArray where the data are copied + */ + void CopySpectralDataComp (const int src_comp, const int dest_comp) + { + // The last two arguments represent the number of components and + // the number of ghost cells to perform this operation + Copy(field_data.fields, field_data.fields, src_comp, dest_comp, 1, 0); + } + + /** + * \brief Set to zero the data on component \c icomp of \c field_data.fields + * + * \param[in] icomp component of the FabArray where the data are set to zero + */ + void ZeroOutDataComp (const int icomp) + { + // The last argument represents the number of components to perform this operation + field_data.fields.setVal(0., icomp, 1); + } + + /** + * \brief Scale the data on component \c icomp of \c field_data.fields + * by a given scale factor + * + * \param[in] icomp component of the FabArray where the data are scaled + * \param[in] scale_factor scale factor to use for scaling + */ + void ScaleDataComp (const int icomp, const amrex::Real scale_factor) + { + // The last argument represents the number of components to perform this operation + field_data.fields.mult(scale_factor, icomp, 1); + } + private: + void ReadParameters (); // Store field in spectral space and perform the Fourier transforms diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp b/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp index d04961c5f..89a7ce1f5 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp @@ -17,21 +17,6 @@ #if WARPX_USE_PSATD -/* \brief Initialize the spectral Maxwell solver - * - * This function selects the spectral algorithm to be used, allocates the - * corresponding coefficients for the discretized field update equation, - * and prepares the structures that store the fields in spectral space. - * - * \param norder_x Order of accuracy of the spatial derivatives along x - * \param norder_y Order of accuracy of the spatial derivatives along y - * \param norder_z Order of accuracy of the spatial derivatives along z - * \param nodal Whether the solver is applied to a nodal or staggered grid - * \param dx Cell size along each dimension - * \param dt Time step - * \param pml Whether the boxes in which the solver is applied are PML boxes - * \param periodic_single_box Whether the full simulation domain consists of a single periodic box (i.e. the global domain is not MPI parallelized) - */ SpectralSolver::SpectralSolver( const int lev, const amrex::BoxArray& realspace_ba, @@ -44,9 +29,10 @@ SpectralSolver::SpectralSolver( const bool pml, const bool periodic_single_box, const bool update_with_rho, const bool fft_do_time_averaging, + const bool J_linear_in_time, const bool dive_cleaning, - const bool divb_cleaning) { - + const bool divb_cleaning) +{ // Initialize all structures using the same distribution mapping dm // - Initialize k space object (Contains info about the size of @@ -70,14 +56,13 @@ SpectralSolver::SpectralSolver( // PSATD algorithms: standard, Galilean, or averaged Galilean else { algorithm = std::make_unique<PsatdAlgorithm>( - k_space, dm, norder_x, norder_y, norder_z, nodal, v_galilean, dt, update_with_rho, fft_do_time_averaging); + k_space, dm, norder_x, norder_y, norder_z, nodal, v_galilean, dt, update_with_rho, fft_do_time_averaging, J_linear_in_time); } } // - Initialize arrays for fields in spectral space + FFT plans field_data = SpectralFieldData( lev, realspace_ba, k_space, dm, algorithm->getRequiredNumberOfFields(), periodic_single_box); - } void diff --git a/Source/FieldSolver/WarpXPushFieldsEM.cpp b/Source/FieldSolver/WarpXPushFieldsEM.cpp index f2fae02be..bea77e650 100644 --- a/Source/FieldSolver/WarpXPushFieldsEM.cpp +++ b/Source/FieldSolver/WarpXPushFieldsEM.cpp @@ -53,158 +53,359 @@ using namespace amrex; #ifdef WARPX_USE_PSATD namespace { + void - PushPSATDSinglePatch ( + ForwardTransformVect ( const int lev, #ifdef WARPX_DIM_RZ SpectralSolverRZ& solver, #else SpectralSolver& solver, #endif - std::array<std::unique_ptr<amrex::MultiFab>,3>& Efield, - std::array<std::unique_ptr<amrex::MultiFab>,3>& Bfield, - std::array<std::unique_ptr<amrex::MultiFab>,3>& Efield_avg, - std::array<std::unique_ptr<amrex::MultiFab>,3>& Bfield_avg, - std::array<std::unique_ptr<amrex::MultiFab>,3>& current, - std::unique_ptr<amrex::MultiFab>& rho ) { - -#ifdef WARPX_DIM_RZ - amrex::ignore_unused(Efield_avg, Bfield_avg); -#endif - - using Idx = SpectralAvgFieldIndex; - - // Perform forward Fourier transform + std::array<std::unique_ptr<amrex::MultiFab>,3>& vector_field, + const int compx, const int compy, const int compz) + { #ifdef WARPX_DIM_RZ - solver.ForwardTransform(lev, - *Efield[0], Idx::Ex, - *Efield[1], Idx::Ey); + solver.ForwardTransform(lev, *vector_field[0], compx, *vector_field[1], compy); #else - solver.ForwardTransform(lev, *Efield[0], Idx::Ex); - solver.ForwardTransform(lev, *Efield[1], Idx::Ey); + solver.ForwardTransform(lev, *vector_field[0], compx); + solver.ForwardTransform(lev, *vector_field[1], compy); #endif - solver.ForwardTransform(lev, *Efield[2], Idx::Ez); + solver.ForwardTransform(lev, *vector_field[2], compz); + } + + void + BackwardTransformVect ( + const int lev, #ifdef WARPX_DIM_RZ - solver.ForwardTransform(lev, - *Bfield[0], Idx::Bx, - *Bfield[1], Idx::By); + SpectralSolverRZ& solver, #else - solver.ForwardTransform(lev, *Bfield[0], Idx::Bx); - solver.ForwardTransform(lev, *Bfield[1], Idx::By); + SpectralSolver& solver, #endif - solver.ForwardTransform(lev, *Bfield[2], Idx::Bz); + std::array<std::unique_ptr<amrex::MultiFab>,3>& vector_field, + const int compx, const int compy, const int compz) + { #ifdef WARPX_DIM_RZ - solver.ForwardTransform(lev, - *current[0], Idx::Jx, - *current[1], Idx::Jy); + solver.BackwardTransform(lev, *vector_field[0], compx, *vector_field[1], compy); #else - solver.ForwardTransform(lev, *current[0], Idx::Jx); - solver.ForwardTransform(lev, *current[1], Idx::Jy); + solver.BackwardTransform(lev, *vector_field[0], compx); + solver.BackwardTransform(lev, *vector_field[1], compy); #endif - solver.ForwardTransform(lev, *current[2], Idx::Jz); + solver.BackwardTransform(lev, *vector_field[2], compz); + } +} + +using IdxAvg = SpectralFieldIndexTimeAveraging; +using IdxLin = SpectralFieldIndexJLinearInTime; + +void +WarpX::PSATDForwardTransformEB () +{ + for (int lev = 0; lev <= finest_level; ++lev) + { + ForwardTransformVect(lev, *spectral_solver_fp[lev], Efield_fp[lev], IdxAvg::Ex, IdxAvg::Ey, IdxAvg::Ez); + ForwardTransformVect(lev, *spectral_solver_fp[lev], Bfield_fp[lev], IdxAvg::Bx, IdxAvg::By, IdxAvg::Bz); - if (rho) { - solver.ForwardTransform(lev, *rho, Idx::rho_old, 0); - solver.ForwardTransform(lev, *rho, Idx::rho_new, 1); + if (spectral_solver_cp[lev]) + { + ForwardTransformVect(lev, *spectral_solver_cp[lev], Efield_cp[lev], IdxAvg::Ex, IdxAvg::Ey, IdxAvg::Ez); + ForwardTransformVect(lev, *spectral_solver_cp[lev], Bfield_cp[lev], IdxAvg::Bx, IdxAvg::By, IdxAvg::Bz); } -#ifdef WARPX_DIM_RZ - if (WarpX::use_kspace_filter) { - solver.ApplyFilter(Idx::rho_old); - solver.ApplyFilter(Idx::rho_new); - solver.ApplyFilter(Idx::Jx, Idx::Jy, Idx::Jz); + } +} + +void +WarpX::PSATDBackwardTransformEB () +{ + for (int lev = 0; lev <= finest_level; ++lev) + { + BackwardTransformVect(lev, *spectral_solver_fp[lev], Efield_fp[lev], IdxAvg::Ex, IdxAvg::Ey, IdxAvg::Ez); + BackwardTransformVect(lev, *spectral_solver_fp[lev], Bfield_fp[lev], IdxAvg::Bx, IdxAvg::By, IdxAvg::Bz); + + if (spectral_solver_cp[lev]) + { + BackwardTransformVect(lev, *spectral_solver_cp[lev], Efield_cp[lev], IdxAvg::Ex, IdxAvg::Ey, IdxAvg::Ez); + BackwardTransformVect(lev, *spectral_solver_cp[lev], Bfield_cp[lev], IdxAvg::Bx, IdxAvg::By, IdxAvg::Bz); } -#endif - // Advance fields in spectral space - solver.pushSpectralFields(); - // Perform backward Fourier Transform + } + + // Damp the fields in the guard cells along z + constexpr int zdir = AMREX_SPACEDIM - 1; + if (WarpX::field_boundary_lo[zdir] == FieldBoundaryType::Damped && + WarpX::field_boundary_hi[zdir] == FieldBoundaryType::Damped) + { + for (int lev = 0; lev <= finest_level; ++lev) + { + DampFieldsInGuards(Efield_fp[lev], Bfield_fp[lev]); + } + } +} + +void +WarpX::PSATDBackwardTransformEBavg () +{ + for (int lev = 0; lev <= finest_level; ++lev) + { + BackwardTransformVect(lev, *spectral_solver_fp[lev], Efield_avg_fp[lev], IdxAvg::Ex_avg, IdxAvg::Ey_avg, IdxAvg::Ez_avg); + BackwardTransformVect(lev, *spectral_solver_fp[lev], Bfield_avg_fp[lev], IdxAvg::Bx_avg, IdxAvg::By_avg, IdxAvg::Bz_avg); + + if (spectral_solver_cp[lev]) + { + BackwardTransformVect(lev, *spectral_solver_cp[lev], Efield_avg_cp[lev], IdxAvg::Ex_avg, IdxAvg::Ey_avg, IdxAvg::Ez_avg); + BackwardTransformVect(lev, *spectral_solver_cp[lev], Bfield_avg_cp[lev], IdxAvg::Bx_avg, IdxAvg::By_avg, IdxAvg::Bz_avg); + } + } +} + +void +WarpX::PSATDForwardTransformF () +{ + for (int lev = 0; lev <= finest_level; ++lev) + { + if (F_fp[lev]) spectral_solver_fp[lev]->ForwardTransform(lev, *F_fp[lev], IdxLin::F); + + if (spectral_solver_cp[lev]) + { + if (F_cp[lev]) spectral_solver_cp[lev]->ForwardTransform(lev, *F_cp[lev], IdxLin::F); + } + } +} + +void +WarpX::PSATDBackwardTransformF () +{ + for (int lev = 0; lev <= finest_level; ++lev) + { + if (F_fp[lev]) spectral_solver_fp[lev]->BackwardTransform(lev, *F_fp[lev], IdxLin::F); + + if (spectral_solver_cp[lev]) + { + if (F_cp[lev]) spectral_solver_cp[lev]->BackwardTransform(lev, *F_cp[lev], IdxLin::F); + } + } +} + +void +WarpX::PSATDForwardTransformG () +{ + for (int lev = 0; lev <= finest_level; ++lev) + { + if (G_fp[lev]) spectral_solver_fp[lev]->ForwardTransform(lev, *G_fp[lev], IdxLin::G); + + if (spectral_solver_cp[lev]) + { + if (G_cp[lev]) spectral_solver_cp[lev]->ForwardTransform(lev, *G_cp[lev], IdxLin::G); + } + } +} + +void +WarpX::PSATDBackwardTransformG () +{ + for (int lev = 0; lev <= finest_level; ++lev) + { + if (G_fp[lev]) spectral_solver_fp[lev]->BackwardTransform(lev, *G_fp[lev], IdxLin::G); + + if (spectral_solver_cp[lev]) + { + if (G_cp[lev]) spectral_solver_cp[lev]->BackwardTransform(lev, *G_cp[lev], IdxLin::G); + } + } +} + +void +WarpX::PSATDForwardTransformJ () +{ + const int idx_jx = (WarpX::J_linear_in_time) ? static_cast<int>(IdxLin::Jx_new) + : static_cast<int>(IdxAvg::Jx); + const int idx_jy = (WarpX::J_linear_in_time) ? static_cast<int>(IdxLin::Jy_new) + : static_cast<int>(IdxAvg::Jy); + const int idx_jz = (WarpX::J_linear_in_time) ? static_cast<int>(IdxLin::Jz_new) + : static_cast<int>(IdxAvg::Jz); + + for (int lev = 0; lev <= finest_level; ++lev) + { + ForwardTransformVect(lev, *spectral_solver_fp[lev], current_fp[lev], idx_jx, idx_jy, idx_jz); + + if (spectral_solver_cp[lev]) + { + ForwardTransformVect(lev, *spectral_solver_cp[lev], current_cp[lev], idx_jx, idx_jy, idx_jz); + } + } + #ifdef WARPX_DIM_RZ - solver.BackwardTransform(lev, - *Efield[0], Idx::Ex, - *Efield[1], Idx::Ey); -#else - solver.BackwardTransform(lev, *Efield[0], Idx::Ex); - solver.BackwardTransform(lev, *Efield[1], Idx::Ey); + // Apply filter in k space if needed + if (WarpX::use_kspace_filter) + { + for (int lev = 0; lev <= finest_level; ++lev) + { + spectral_solver_fp[lev]->ApplyFilter(IdxAvg::Jx, IdxAvg::Jy, IdxAvg::Jz); + + if (spectral_solver_cp[lev]) + { + spectral_solver_cp[lev]->ApplyFilter(IdxAvg::Jx, IdxAvg::Jy, IdxAvg::Jz); + } + } + } #endif - solver.BackwardTransform(lev, *Efield[2], Idx::Ez); +} + +void +WarpX::PSATDForwardTransformRho (const int icomp) +{ + // Select index in k space + const int dst_comp = (icomp == 0) ? IdxAvg::rho_old : IdxAvg::rho_new; + + for (int lev = 0; lev <= finest_level; ++lev) + { + if (rho_fp[lev]) spectral_solver_fp[lev]->ForwardTransform(lev, *rho_fp[lev], dst_comp, icomp); + + if (spectral_solver_cp[lev]) + { + if (rho_cp[lev]) spectral_solver_cp[lev]->ForwardTransform(lev, *rho_cp[lev], dst_comp, icomp); + } + } + #ifdef WARPX_DIM_RZ - solver.BackwardTransform(lev, - *Bfield[0], Idx::Bx, - *Bfield[1], Idx::By); -#else - solver.BackwardTransform(lev, *Bfield[0], Idx::Bx); - solver.BackwardTransform(lev, *Bfield[1], Idx::By); -#endif - solver.BackwardTransform(lev, *Bfield[2], Idx::Bz); + // Apply filter in k space if needed + if (WarpX::use_kspace_filter) + { + for (int lev = 0; lev <= finest_level; ++lev) + { + spectral_solver_fp[lev]->ApplyFilter(dst_comp); -#ifndef WARPX_DIM_RZ - if (WarpX::fft_do_time_averaging){ - solver.BackwardTransform(lev, *Efield_avg[0], Idx::Ex_avg); - solver.BackwardTransform(lev, *Efield_avg[1], Idx::Ey_avg); - solver.BackwardTransform(lev, *Efield_avg[2], Idx::Ez_avg); - - solver.BackwardTransform(lev, *Bfield_avg[0], Idx::Bx_avg); - solver.BackwardTransform(lev, *Bfield_avg[1], Idx::By_avg); - solver.BackwardTransform(lev, *Bfield_avg[2], Idx::Bz_avg); + if (spectral_solver_cp[lev]) + { + spectral_solver_cp[lev]->ApplyFilter(dst_comp); + } } + } #endif +} + +void +WarpX::PSATDPushSpectralFields () +{ + for (int lev = 0; lev <= finest_level; ++lev) + { + spectral_solver_fp[lev]->pushSpectralFields(); + + if (spectral_solver_cp[lev]) + { + spectral_solver_cp[lev]->pushSpectralFields(); + } } } -#endif +#ifndef WARPX_DIM_RZ void -WarpX::PushPSATD (amrex::Real a_dt) +WarpX::PSATDMoveRhoNewToRhoOld () { -#ifndef WARPX_USE_PSATD - amrex::ignore_unused(a_dt); - AMREX_ALWAYS_ASSERT_WITH_MESSAGE(false, - "PushFieldsEM: PSATD solver selected but not built."); -#else - for (int lev = 0; lev <= finest_level; ++lev) { - AMREX_ALWAYS_ASSERT_WITH_MESSAGE(dt[lev] == a_dt, "dt must be consistent"); - PushPSATD(lev, a_dt); + for (int lev = 0; lev <= finest_level; ++lev) + { + spectral_solver_fp[lev]->CopySpectralDataComp(IdxAvg::rho_new, IdxAvg::rho_old); - // Evolve the fields in the PML boxes - if (do_pml && pml[lev]->ok()) { - pml[lev]->PushPSATD(lev); + if (spectral_solver_cp[lev]) + { + spectral_solver_cp[lev]->CopySpectralDataComp(IdxAvg::rho_new, IdxAvg::rho_old); } - ApplyEfieldBoundary(lev,PatchType::fine); - if (lev > 0) ApplyEfieldBoundary(lev,PatchType::coarse); - ApplyBfieldBoundary(lev,PatchType::fine); - if (lev > 0) ApplyBfieldBoundary(lev,PatchType::coarse); } -#endif } void -WarpX::PushPSATD (int lev, amrex::Real /* dt */) { -#ifndef WARPX_USE_PSATD - amrex::ignore_unused(lev); - AMREX_ALWAYS_ASSERT_WITH_MESSAGE(false, - "PushFieldsEM: PSATD solver selected but not built."); -#else - if (WarpX::maxwell_solver_id != MaxwellSolverAlgo::PSATD) { - AMREX_ALWAYS_ASSERT_WITH_MESSAGE(false, - "WarpX::PushPSATD: only supported for PSATD solver."); +WarpX::PSATDMoveJNewToJOld () +{ + for (int lev = 0; lev <= finest_level; ++lev) + { + spectral_solver_fp[lev]->CopySpectralDataComp(IdxLin::Jx_new, IdxLin::Jx_old); + spectral_solver_fp[lev]->CopySpectralDataComp(IdxLin::Jy_new, IdxLin::Jy_old); + spectral_solver_fp[lev]->CopySpectralDataComp(IdxLin::Jz_new, IdxLin::Jz_old); + + if (spectral_solver_cp[lev]) + { + spectral_solver_cp[lev]->CopySpectralDataComp(IdxLin::Jx_new, IdxLin::Jx_old); + spectral_solver_cp[lev]->CopySpectralDataComp(IdxLin::Jy_new, IdxLin::Jy_old); + spectral_solver_cp[lev]->CopySpectralDataComp(IdxLin::Jz_new, IdxLin::Jz_old); + } } - // Update the fields on the fine and coarse patch - PushPSATDSinglePatch( lev, *spectral_solver_fp[lev], - Efield_fp[lev], Bfield_fp[lev], Efield_avg_fp[lev], Bfield_avg_fp[lev], current_fp[lev], rho_fp[lev] ); - if (spectral_solver_cp[lev]) { - PushPSATDSinglePatch( lev, *spectral_solver_cp[lev], - Efield_cp[lev], Bfield_cp[lev], Efield_avg_cp[lev], Bfield_avg_cp[lev], current_cp[lev], rho_cp[lev] ); +} + +void +WarpX::PSATDEraseAverageFields () +{ + for (int lev = 0; lev <= finest_level; ++lev) + { + spectral_solver_fp[lev]->ZeroOutDataComp(IdxAvg::Ex_avg); + spectral_solver_fp[lev]->ZeroOutDataComp(IdxAvg::Ey_avg); + spectral_solver_fp[lev]->ZeroOutDataComp(IdxAvg::Ez_avg); + spectral_solver_fp[lev]->ZeroOutDataComp(IdxAvg::Bx_avg); + spectral_solver_fp[lev]->ZeroOutDataComp(IdxAvg::By_avg); + spectral_solver_fp[lev]->ZeroOutDataComp(IdxAvg::Bz_avg); + + if (spectral_solver_cp[lev]) + { + spectral_solver_cp[lev]->ZeroOutDataComp(IdxAvg::Ex_avg); + spectral_solver_cp[lev]->ZeroOutDataComp(IdxAvg::Ey_avg); + spectral_solver_cp[lev]->ZeroOutDataComp(IdxAvg::Ez_avg); + spectral_solver_cp[lev]->ZeroOutDataComp(IdxAvg::Bx_avg); + spectral_solver_cp[lev]->ZeroOutDataComp(IdxAvg::By_avg); + spectral_solver_cp[lev]->ZeroOutDataComp(IdxAvg::Bz_avg); + } } +} - // Damp the fields in the guard cells along z - constexpr int zdir = AMREX_SPACEDIM - 1; - if (WarpX::field_boundary_lo[zdir] == FieldBoundaryType::Damped && - WarpX::field_boundary_hi[zdir] == FieldBoundaryType::Damped) +void +WarpX::PSATDScaleAverageFields (const amrex::Real scale_factor) +{ + for (int lev = 0; lev <= finest_level; ++lev) { - DampFieldsInGuards(Efield_fp[lev], Bfield_fp[lev]); + spectral_solver_fp[lev]->ScaleDataComp(IdxAvg::Ex_avg, scale_factor); + spectral_solver_fp[lev]->ScaleDataComp(IdxAvg::Ey_avg, scale_factor); + spectral_solver_fp[lev]->ScaleDataComp(IdxAvg::Ez_avg, scale_factor); + spectral_solver_fp[lev]->ScaleDataComp(IdxAvg::Bx_avg, scale_factor); + spectral_solver_fp[lev]->ScaleDataComp(IdxAvg::By_avg, scale_factor); + spectral_solver_fp[lev]->ScaleDataComp(IdxAvg::Bz_avg, scale_factor); + + if (spectral_solver_cp[lev]) + { + spectral_solver_cp[lev]->ScaleDataComp(IdxAvg::Ex_avg, scale_factor); + spectral_solver_cp[lev]->ScaleDataComp(IdxAvg::Ey_avg, scale_factor); + spectral_solver_cp[lev]->ScaleDataComp(IdxAvg::Ez_avg, scale_factor); + spectral_solver_cp[lev]->ScaleDataComp(IdxAvg::Bx_avg, scale_factor); + spectral_solver_cp[lev]->ScaleDataComp(IdxAvg::By_avg, scale_factor); + spectral_solver_cp[lev]->ScaleDataComp(IdxAvg::Bz_avg, scale_factor); + } + } +} +#endif // not WARPX_DIM_RZ +#endif // WARPX_USE_PSATD - if (WarpX::fft_do_time_averaging) +void +WarpX::PushPSATD () +{ +#ifndef WARPX_USE_PSATD + amrex::Abort("PushFieldsEM: PSATD solver selected but not built"); +#else + + PSATDForwardTransformEB(); + PSATDForwardTransformJ(); + PSATDForwardTransformRho(0); // rho old + PSATDForwardTransformRho(1); // rho new + PSATDPushSpectralFields(); + PSATDBackwardTransformEB(); + if (WarpX::fft_do_time_averaging) PSATDBackwardTransformEBavg(); + + // Evolve the fields in the PML boxes + for (int lev = 0; lev <= finest_level; ++lev) + { + if (do_pml && pml[lev]->ok()) { - DampFieldsInGuards(Efield_avg_fp[lev], Bfield_avg_fp[lev]); + pml[lev]->PushPSATD(lev); } + ApplyEfieldBoundary(lev, PatchType::fine); + if (lev > 0) ApplyEfieldBoundary(lev, PatchType::coarse); + ApplyBfieldBoundary(lev, PatchType::fine); + if (lev > 0) ApplyBfieldBoundary(lev, PatchType::coarse); } #endif } diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 7acb332fd..0c4a6426d 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -261,7 +261,7 @@ WarpX::InitPML () pml_ncell, pml_delta, amrex::IntVect::TheZeroVector(), dt[0], nox_fft, noy_fft, noz_fft, do_nodal, do_moving_window, pml_has_particles, do_pml_in_domain, - do_pml_dive_cleaning, do_pml_divb_cleaning, + J_linear_in_time, do_pml_dive_cleaning, do_pml_divb_cleaning, do_pml_Lo_corrected, do_pml_Hi); for (int lev = 1; lev <= finest_level; ++lev) { @@ -289,7 +289,7 @@ WarpX::InitPML () pml_ncell, pml_delta, refRatio(lev-1), dt[lev], nox_fft, noy_fft, noz_fft, do_nodal, do_moving_window, pml_has_particles, do_pml_in_domain, - do_pml_dive_cleaning, do_pml_divb_cleaning, + J_linear_in_time, do_pml_dive_cleaning, do_pml_divb_cleaning, do_pml_Lo_MR, do_pml_Hi_MR); } } diff --git a/Source/Parallelization/GuardCellManager.cpp b/Source/Parallelization/GuardCellManager.cpp index c4c9c7b1b..cc0d28535 100644 --- a/Source/Parallelization/GuardCellManager.cpp +++ b/Source/Parallelization/GuardCellManager.cpp @@ -189,8 +189,10 @@ guardCellManager::Init ( ng_alloc_F[i_dim] = ng_required; ng_alloc_Rho[i_dim] = ng_required; ng_alloc_F_int = ng_required; + ng_alloc_G_int = ng_required; } ng_alloc_F = IntVect(AMREX_D_DECL(ng_alloc_F_int, ng_alloc_F_int, ng_alloc_F_int)); + ng_alloc_G = IntVect(AMREX_D_DECL(ng_alloc_G_int, ng_alloc_G_int, ng_alloc_G_int)); } // Compute number of cells required for Field Solver diff --git a/Source/Particles/MultiParticleContainer.H b/Source/Particles/MultiParticleContainer.H index cad602e58..bc0d65e24 100644 --- a/Source/Particles/MultiParticleContainer.H +++ b/Source/Particles/MultiParticleContainer.H @@ -137,6 +137,39 @@ public: */ std::unique_ptr<amrex::MultiFab> GetZeroChargeDensity(const int lev); + /** + * \brief Deposit charge density. + * + * \param[in,out] rho vector of charge densities (one pointer to MultiFab per mesh refinement level) + * \param[in] relative_t Time at which to deposit rho, relative to the time + * of the current positions of the particles (expressed as + * a fraction of dt). When different than 0, the particle + * position will be temporarily modified to match the time + * of the deposition. + * \param[in] icomp component of the MultiFab where rho is deposited (old, new) + */ + void + DepositCharge (amrex::Vector<std::unique_ptr<amrex::MultiFab> >& rho, + const amrex::Real relative_t, const int icomp = 0); + + /** + * \brief Deposit current density. + * + * \param[in,out] J vector of current densities (one three-dimensional array of pointers + * to MultiFabs per mesh refinement level) + * \param[in] dt: Time step for particle level (is used when temporarily + * modifying the particle positions, either within the + * Esirkepov or when `relative_t` is different than 0 + * \param[in] relative_t: Time at which to deposit J, relative to the time of + * the current positions of the particles (expressed as + * a fraction of dt). When different than 0, the particle + * position will be temporarily modified to match the + * time of the deposition. + */ + void + DepositCurrent (amrex::Vector<std::array< std::unique_ptr<amrex::MultiFab>, 3 > >& J, + const amrex::Real dt, const amrex::Real relative_t); + /// /// This deposits the particle charge onto a node-centered MultiFab and returns a unique ptr /// to it. The charge density is accumulated over all the particles in the MultiParticleContainer diff --git a/Source/Particles/MultiParticleContainer.cpp b/Source/Particles/MultiParticleContainer.cpp index 4990f7c6d..b7d34ec75 100644 --- a/Source/Particles/MultiParticleContainer.cpp +++ b/Source/Particles/MultiParticleContainer.cpp @@ -421,6 +421,72 @@ MultiParticleContainer::GetZeroChargeDensity (const int lev) return zero_rho; } +void +MultiParticleContainer::DepositCurrent ( + amrex::Vector<std::array< std::unique_ptr<amrex::MultiFab>, 3 > >& J, + const amrex::Real dt, const amrex::Real relative_t) +{ + // Reset the J arrays + for (int lev = 0; lev < J.size(); ++lev) + { + J[lev][0]->setVal(0.0, J[lev][0]->nGrowVect()); + J[lev][1]->setVal(0.0, J[lev][1]->nGrowVect()); + J[lev][2]->setVal(0.0, J[lev][2]->nGrowVect()); + } + + // Call the deposition kernel for each species + for (int ispecies = 0; ispecies < nSpecies(); ispecies++) + { + WarpXParticleContainer& species = GetParticleContainer(ispecies); + species.DepositCurrent(J, dt, relative_t); + } + +#ifdef WARPX_DIM_RZ + for (int lev = 0; lev < J.size(); ++lev) + { + WarpX::GetInstance().ApplyInverseVolumeScalingToCurrentDensity(J[lev][0].get(), J[lev][1].get(), J[lev][2].get(), lev); + } +#endif +} + +void +MultiParticleContainer::DepositCharge ( + amrex::Vector<std::unique_ptr<amrex::MultiFab> >& rho, + const amrex::Real relative_t, const int icomp) +{ + // Reset the rho array + for (int lev = 0; lev < rho.size(); ++lev) + { + int const nc = WarpX::ncomps; + rho[lev]->setVal(0.0, icomp*nc, nc, rho[lev]->nGrowVect()); + } + + // Push the particles in time, if needed + if (relative_t != 0.) PushX(relative_t); + + // Call the deposition kernel for each species + for (int ispecies = 0; ispecies < nSpecies(); ispecies++) + { + WarpXParticleContainer& species = GetParticleContainer(ispecies); + bool const local = true; + bool const reset = false; + bool const do_rz_volume_scaling = false; + bool const interpolate_across_levels = false; + species.DepositCharge(rho, local, reset, do_rz_volume_scaling, + interpolate_across_levels, icomp); + } + + // Push the particles back in time + if (relative_t != 0.) PushX(-relative_t); + +#ifdef WARPX_DIM_RZ + for (int lev = 0; lev < rho.size(); ++lev) + { + WarpX::GetInstance().ApplyInverseVolumeScalingToChargeDensity(rho[lev].get(), lev); + } +#endif +} + std::unique_ptr<MultiFab> MultiParticleContainer::GetChargeDensity (int lev, bool local) { diff --git a/Source/Particles/WarpXParticleContainer.H b/Source/Particles/WarpXParticleContainer.H index cde77dc74..1f3551c7b 100644 --- a/Source/Particles/WarpXParticleContainer.H +++ b/Source/Particles/WarpXParticleContainer.H @@ -206,9 +206,39 @@ public: const amrex::MultiFab& By, const amrex::MultiFab& Bz) = 0; - void DepositCharge(amrex::Vector<std::unique_ptr<amrex::MultiFab> >& rho, - bool local = false, bool reset = false, - bool do_rz_volume_scaling = false ); + /** + * \brief Deposit current density. + * + * \param[in,out] J vector of current densities (one three-dimensional array of pointers + * to MultiFabs per mesh refinement level) + * \param[in] dt: Time step for particle level (is used when temporarily + * modifying the particle positions, either within the + * Esirkepov or when `relative_t` is different than 0 + * \param[in] relative_t: Time at which to deposit J, relative to the time + * of the current positions of the particles (expressed as + * a fraction of dt). When different than 0, the particle + * position will be temporarily modified to match the + * time of the deposition. + */ + void DepositCurrent (amrex::Vector<std::array< std::unique_ptr<amrex::MultiFab>, 3 > >& J, + const amrex::Real dt, const amrex::Real relative_t); + + /** + * \brief Deposit charge density. + * + * \param[in,out] rho vector of charge densities (one pointer to MultiFab per mesh refinement level) + * \param[in] local if false, exchange the data in the guard cells after the deposition + * \param[in] reset if true, reset all values of rho to zero + * \param[in] do_rz_volume_scaling whether to scale the final density by some volume norm in RZ geometry + * \param[in] interpolate_across_levels whether to average down from the fine patch to the coarse patch + * \param[in] icomp component of the MultiFab where rho is deposited (old, new) + */ + void DepositCharge (amrex::Vector<std::unique_ptr<amrex::MultiFab> >& rho, + const bool local = false, const bool reset = false, + const bool do_rz_volume_scaling = false, + const bool interpolate_across_levels = true, + const int icomp = 0); + std::unique_ptr<amrex::MultiFab> GetChargeDensity(int lev, bool local = false); virtual void DepositCharge (WarpXParIter& pti, diff --git a/Source/Particles/WarpXParticleContainer.cpp b/Source/Particles/WarpXParticleContainer.cpp index abb2a224c..564ffa706 100644 --- a/Source/Particles/WarpXParticleContainer.cpp +++ b/Source/Particles/WarpXParticleContainer.cpp @@ -499,6 +499,47 @@ WarpXParticleContainer::DepositCurrent (WarpXParIter& pti, #endif } +void +WarpXParticleContainer::DepositCurrent ( + amrex::Vector<std::array< std::unique_ptr<amrex::MultiFab>, 3 > >& J, + const amrex::Real dt, const amrex::Real relative_t) +{ + // Loop over the refinement levels + int const finest_level = J.size() - 1; + for (int lev = 0; lev <= finest_level; ++lev) + { + // Loop over particle tiles and deposit current on each level +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) + { + int thread_num = omp_get_thread_num(); +#else + int thread_num = 0; +#endif + for (WarpXParIter pti(*this, lev); pti.isValid(); ++pti) + { + const long np = pti.numParticles(); + const auto & wp = pti.GetAttribs(PIdx::w); + const auto & uxp = pti.GetAttribs(PIdx::ux); + const auto & uyp = pti.GetAttribs(PIdx::uy); + const auto & uzp = pti.GetAttribs(PIdx::uz); + + int* AMREX_RESTRICT ion_lev = nullptr; + if (do_field_ionization) + { + ion_lev = pti.GetiAttribs(particle_icomps["ionization_level"]).dataPtr(); + } + + DepositCurrent(pti, wp, uxp, uyp, uzp, ion_lev, + J[lev][0].get(), J[lev][1].get(), J[lev][2].get(), + 0, np, thread_num, lev, lev, dt, relative_t/dt); + } +#ifdef AMREX_USE_OMP + } +#endif + } +} + /* \brief Charge Deposition for thread thread_num * \param pti : Particle iterator * \param wp : Array of particle weights @@ -666,18 +707,21 @@ WarpXParticleContainer::DepositCharge (WarpXParIter& pti, RealVector& wp, void WarpXParticleContainer::DepositCharge (amrex::Vector<std::unique_ptr<amrex::MultiFab> >& rho, - bool local, bool reset, - bool do_rz_volume_scaling) + const bool local, const bool reset, + const bool do_rz_volume_scaling, + const bool interpolate_across_levels, + const int icomp) { #ifdef WARPX_DIM_RZ (void)do_rz_volume_scaling; #endif // Loop over the refinement levels int const finest_level = rho.size() - 1; - for (int lev = 0; lev <= finest_level; ++lev) { - - // Reset the `rho` array if `reset` is True - if (reset) rho[lev]->setVal(0.0, rho[lev]->nGrowVect()); + for (int lev = 0; lev <= finest_level; ++lev) + { + // Reset the rho array if reset is True + int const nc = WarpX::ncomps; + if (reset) rho[lev]->setVal(0., icomp*nc, nc, rho[lev]->nGrowVect()); // Loop over particle tiles and deposit charge on each level #ifdef AMREX_USE_OMP @@ -692,21 +736,21 @@ WarpXParticleContainer::DepositCharge (amrex::Vector<std::unique_ptr<amrex::Mult const long np = pti.numParticles(); auto& wp = pti.GetAttribs(PIdx::w); - int* AMREX_RESTRICT ion_lev; - if (do_field_ionization){ + int* AMREX_RESTRICT ion_lev = nullptr; + if (do_field_ionization) + { ion_lev = pti.GetiAttribs(particle_icomps["ionization_level"]).dataPtr(); - } else { - ion_lev = nullptr; } - DepositCharge(pti, wp, ion_lev, rho[lev].get(), 0, 0, np, thread_num, lev, lev); + DepositCharge(pti, wp, ion_lev, rho[lev].get(), icomp, 0, np, thread_num, lev, lev); } #ifdef AMREX_USE_OMP } #endif #ifdef WARPX_DIM_RZ - if (do_rz_volume_scaling) { + if (do_rz_volume_scaling) + { WarpX::GetInstance().ApplyInverseVolumeScalingToChargeDensity(rho[lev].get(), lev); } #else @@ -714,22 +758,24 @@ WarpXParticleContainer::DepositCharge (amrex::Vector<std::unique_ptr<amrex::Mult #endif // Exchange guard cells - if (!local) rho[lev]->SumBoundary( m_gdb->Geom(lev).periodicity() ); + if (local == false) rho[lev]->SumBoundary(m_gdb->Geom(lev).periodicity()); } // Now that the charge has been deposited at each level, // we average down from fine to crse - for (int lev = finest_level - 1; lev >= 0; --lev) { - const DistributionMapping& fine_dm = rho[lev+1]->DistributionMap(); - BoxArray coarsened_fine_BA = rho[lev+1]->boxArray(); - coarsened_fine_BA.coarsen(m_gdb->refRatio(lev)); - MultiFab coarsened_fine_data(coarsened_fine_BA, fine_dm, rho[lev+1]->nComp(), 0); - coarsened_fine_data.setVal(0.0); - - int const refinement_ratio = 2; - - CoarsenMR::Coarsen( coarsened_fine_data, *rho[lev+1], IntVect(refinement_ratio) ); - rho[lev]->ParallelAdd( coarsened_fine_data, m_gdb->Geom(lev).periodicity() ); + if (interpolate_across_levels) + { + for (int lev = finest_level - 1; lev >= 0; --lev) + { + const DistributionMapping& fine_dm = rho[lev+1]->DistributionMap(); + BoxArray coarsened_fine_BA = rho[lev+1]->boxArray(); + coarsened_fine_BA.coarsen(m_gdb->refRatio(lev)); + MultiFab coarsened_fine_data(coarsened_fine_BA, fine_dm, rho[lev+1]->nComp(), 0); + coarsened_fine_data.setVal(0.0); + const int refinement_ratio = 2; + CoarsenMR::Coarsen(coarsened_fine_data, *rho[lev+1], IntVect(refinement_ratio)); + rho[lev]->ParallelAdd(coarsened_fine_data, m_gdb->Geom(lev).periodicity()); + } } } @@ -789,7 +835,7 @@ WarpXParticleContainer::GetChargeDensity (int lev, bool local) WarpX::GetInstance().ApplyInverseVolumeScalingToChargeDensity(rho.get(), lev); #endif - if (!local) rho->SumBoundary(gm.periodicity()); + if (local == false) rho->SumBoundary(gm.periodicity()); return rho; } @@ -814,7 +860,7 @@ Real WarpXParticleContainer::sumParticleCharge(bool local) { } } - if (!local) ParallelDescriptor::ReduceRealSum(total_charge); + if (local == false) ParallelDescriptor::ReduceRealSum(total_charge); total_charge *= this->charge; return total_charge; } @@ -890,7 +936,7 @@ std::array<Real, 3> WarpXParticleContainer::meanParticleVelocity(bool local) { } } - if (!local) { + if (local == false) { ParallelDescriptor::ReduceRealSum(vx_total); ParallelDescriptor::ReduceRealSum(vy_total); ParallelDescriptor::ReduceRealSum(vz_total); @@ -929,7 +975,7 @@ Real WarpXParticleContainer::maxParticleVelocity(bool local) { } } - if (!local) ParallelAllReduce::Max(max_v, ParallelDescriptor::Communicator()); + if (local == false) ParallelAllReduce::Max(max_v, ParallelDescriptor::Communicator()); return max_v; } diff --git a/Source/WarpX.H b/Source/WarpX.H index a70ea2af2..4f985f9c4 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -204,6 +204,9 @@ public: static amrex::IntVect sort_bin_size; static int do_subcycling; + static int do_multi_J; + static int do_multi_J_n_depositions; + static int J_linear_in_time; static bool do_device_synchronize_before_profile; static bool safe_guard_cells; @@ -768,6 +771,11 @@ private: void OneStep_nosub (amrex::Real t); void OneStep_sub1 (amrex::Real t); + /** + * \brief Perform one PIC iteration, with the multiple J deposition per time step + */ + void OneStep_multiJ (const amrex::Real t); + void RestrictCurrentFromFineToCoarsePatch (int lev); void AddCurrentFromFineLevelandSumBoundary (int lev); void StoreCurrent (int lev); @@ -1110,9 +1118,88 @@ private: void ScaleAreas (); private: - // void EvolvePSATD (int numsteps); - void PushPSATD (amrex::Real dt); - void PushPSATD (int lev, amrex::Real dt); + + void PushPSATD (); + +#ifdef WARPX_USE_PSATD + + /** + * \brief Forward FFT of E,B on all mesh refinement levels + */ + void PSATDForwardTransformEB (); + + /** + * \brief Backward FFT of E,B on all mesh refinement levels, + * with field damping in the guard cells (if needed) + */ + void PSATDBackwardTransformEB (); + + /** + * \brief Backward FFT of averaged E,B on all mesh refinement levels + */ + void PSATDBackwardTransformEBavg (); + + /** + * \brief Forward FFT of J on all mesh refinement levels, + * with k-space filtering (if needed) + */ + void PSATDForwardTransformJ (); + + /** + * \brief Forward FFT of rho on all mesh refinement levels, + * with k-space filtering (if needed) + * + * \param[in] icomp index of fourth component (0 for rho_old, 1 for rho_new) + */ + void PSATDForwardTransformRho (const int icomp); + + /** + * \brief Copy rho_new to rho_old in spectral space + */ + void PSATDMoveRhoNewToRhoOld (); + + /** + * \brief Copy J_new to J_old in spectral space (when J is linear in time) + */ + void PSATDMoveJNewToJOld (); + + /** + * \brief Forward FFT of F on all mesh refinement levels + */ + void PSATDForwardTransformF (); + + /** + * \brief Backward FFT of F on all mesh refinement levels + */ + void PSATDBackwardTransformF (); + + /** + * \brief Forward FFT of G on all mesh refinement levels + */ + void PSATDForwardTransformG (); + + /** + * \brief Backward FFT of G on all mesh refinement levels + */ + void PSATDBackwardTransformG (); + + /** + * \brief Update all necessary fields in spectral space + */ + void PSATDPushSpectralFields (); + + /** + * \brief Scale averaged E,B fields to account for time integration + * + * \param[in] scale_factor scalar to multiply each field component by + */ + void PSATDScaleAverageFields (const amrex::Real scale_factor); + + /** + * \brief Set averaged E,B fields to zero before new iteration + */ + void PSATDEraseAverageFields (); +#endif int fftw_plan_measure = 1; // used with PSATD diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index d4f9b5de1..a31c0c1e3 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -171,6 +171,9 @@ Real WarpX::self_fields_required_precision = 1.e-11_rt; int WarpX::self_fields_max_iters = 200; int WarpX::do_subcycling = 0; +int WarpX::do_multi_J = 0; +int WarpX::do_multi_J_n_depositions; +int WarpX::J_linear_in_time = 0; bool WarpX::safe_guard_cells = 0; IntVect WarpX::filter_npass_each_dir(1); @@ -447,6 +450,11 @@ WarpX::ReadParameters () pp_warpx.query("verbose", verbose); pp_warpx.query("regrid_int", regrid_int); pp_warpx.query("do_subcycling", do_subcycling); + pp_warpx.query("do_multi_J", do_multi_J); + if (do_multi_J) + { + pp_warpx.get("do_multi_J_n_depositions", do_multi_J_n_depositions); + } pp_warpx.query("use_hybrid_QED", use_hybrid_QED); pp_warpx.query("safe_guard_cells", safe_guard_cells); std::vector<std::string> override_sync_intervals_string_vec = {"1"}; @@ -616,12 +624,18 @@ WarpX::ReadParameters () pp_warpx.query("do_pml_j_damping", do_pml_j_damping); pp_warpx.query("do_pml_in_domain", do_pml_in_domain); + if (do_multi_J && do_pml) + { + amrex::Abort("Multi-J algorithm not implemented with PMLs"); + } + // div(E) cleaning not implemented for PSATD solver if (maxwell_solver_id == MaxwellSolverAlgo::PSATD) { - AMREX_ALWAYS_ASSERT_WITH_MESSAGE( - do_dive_cleaning == 0, - "warpx.do_dive_cleaning = 1 not implemented for PSATD solver"); + if (do_multi_J == 0 && do_dive_cleaning == 1) + { + amrex::Abort("warpx.do_dive_cleaning = 1 not implemented for PSATD solver"); + } } // Default values of WarpX::do_pml_dive_cleaning and WarpX::do_pml_divb_cleaning: @@ -958,6 +972,7 @@ WarpX::ReadParameters () pp_psatd.query("current_correction", current_correction); pp_psatd.query("v_comoving", m_v_comoving); pp_psatd.query("do_time_averaging", fft_do_time_averaging); + pp_psatd.query("J_linear_in_time", J_linear_in_time); if (!fft_periodic_single_box && current_correction) amrex::Abort( @@ -1021,6 +1036,24 @@ WarpX::ReadParameters () "psatd.update_with_rho must be equal to 1 for comoving PSATD"); } + if (do_multi_J) + { + if (m_v_galilean[0] != 0. || m_v_galilean[1] != 0. || m_v_galilean[2] != 0.) + { + amrex::Abort("Multi-J algorithm not implemented with Galilean PSATD"); + } + } + + if (J_linear_in_time) + { + AMREX_ALWAYS_ASSERT_WITH_MESSAGE(update_with_rho, + "psatd.update_with_rho must be set to 1 when psatd.J_linear_in_time = 1"); + AMREX_ALWAYS_ASSERT_WITH_MESSAGE(do_dive_cleaning, + "warpx.do_dive_cleaning must be set to 1 when psatd.J_linear_in_time = 1"); + AMREX_ALWAYS_ASSERT_WITH_MESSAGE(do_divb_cleaning, + "warpx.do_divb_cleaning must be set to 1 when psatd.J_linear_in_time = 1"); + } + constexpr int zdir = AMREX_SPACEDIM - 1; if (WarpX::field_boundary_lo[zdir] == FieldBoundaryType::Damped || WarpX::field_boundary_hi[zdir] == FieldBoundaryType::Damped ) { @@ -1776,6 +1809,9 @@ void WarpX::AllocLevelSpectralSolver (amrex::Vector<std::unique_ptr<SpectralSolv RealVect dx_vect(dx[0], dx[2]); #endif + amrex::Real solver_dt = dt[lev]; + if (WarpX::do_multi_J) solver_dt /= static_cast<amrex::Real>(WarpX::do_multi_J_n_depositions); + auto pss = std::make_unique<SpectralSolver>(lev, realspace_ba, dm, @@ -1786,11 +1822,12 @@ void WarpX::AllocLevelSpectralSolver (amrex::Vector<std::unique_ptr<SpectralSolv m_v_galilean, m_v_comoving, dx_vect, - dt[lev], + solver_dt, pml_flag, fft_periodic_single_box, update_with_rho, - fft_do_time_averaging); + fft_do_time_averaging, + J_linear_in_time); spectral_solver[lev] = std::move(pss); } # endif |