aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Edoardo Zoni <59625522+EZoni@users.noreply.github.com> 2023-03-08 20:52:56 -0800
committerGravatar GitHub <noreply@github.com> 2023-03-09 04:52:56 +0000
commit03b2fe60ff49748aaff8402824ea0457eef24d5c (patch)
tree1f29cb899516a03eecc5babd9e9a65f84a8f7dd4
parent92013ab8403512a0d42ee3ba49f474b72d1ed88f (diff)
downloadWarpX-03b2fe60ff49748aaff8402824ea0457eef24d5c.tar.gz
WarpX-03b2fe60ff49748aaff8402824ea0457eef24d5c.tar.zst
WarpX-03b2fe60ff49748aaff8402824ea0457eef24d5c.zip
New user input for grid type (collocated, staggered, hybrid) (#3683)
* Introduce `warpx.grid_type` parameter * Replace `or` with `||` * Update examples with new user input syntax * Fix `if` condition * Improve error message * Fix `if` condition * Fix bugs * Fix warning * Fix RZ * Debugging * Fix RZ * Fix bug * Clean up * More changes: - set default algo parameters with hybrid grid - all hybrid input parameters under warpx name * Set default field gathering algo for hybrid grids * Update documentation
-rw-r--r--Docs/source/developers/qed.rst4
-rw-r--r--Docs/source/usage/parameters.rst54
-rw-r--r--Examples/Tests/comoving/inputs_2d_hybrid7
-rw-r--r--Examples/Tests/maxwell_hybrid_qed/inputs_2d2
-rw-r--r--Examples/Tests/nci_psatd_stability/inputs_2d2
-rw-r--r--Examples/Tests/nci_psatd_stability/inputs_2d_hybrid7
-rw-r--r--Examples/Tests/nci_psatd_stability/inputs_3d2
-rw-r--r--Examples/Tests/nci_psatd_stability/inputs_avg_2d2
-rw-r--r--Examples/Tests/nci_psatd_stability/inputs_avg_3d2
-rw-r--r--Examples/Tests/nci_psatd_stability/inputs_rz2
-rw-r--r--Examples/Tests/pml/inputs_3d2
-rw-r--r--Examples/Tests/vay_deposition/inputs_2d2
-rw-r--r--Examples/Tests/vay_deposition/inputs_3d2
-rw-r--r--Regression/WarpX-GPU-tests.ini8
-rw-r--r--Regression/WarpX-tests.ini28
-rw-r--r--Source/BoundaryConditions/PML.H2
-rw-r--r--Source/BoundaryConditions/PML.cpp22
-rw-r--r--Source/Evolve/WarpXComputeDt.cpp2
-rw-r--r--Source/FieldSolver/FiniteDifferenceSolver/ComputeDivE.cpp2
-rw-r--r--Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp4
-rw-r--r--Source/FieldSolver/FiniteDifferenceSolver/EvolveBPML.cpp2
-rw-r--r--Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp2
-rw-r--r--Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp2
-rw-r--r--Source/FieldSolver/FiniteDifferenceSolver/EvolveF.cpp2
-rw-r--r--Source/FieldSolver/FiniteDifferenceSolver/EvolveFPML.cpp2
-rw-r--r--Source/FieldSolver/FiniteDifferenceSolver/EvolveG.cpp2
-rw-r--r--Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H6
-rw-r--r--Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.cpp6
-rw-r--r--Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp2
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.H2
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.cpp15
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.H4
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.cpp4
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmGalileanRZ.H2
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmGalileanRZ.cpp4
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.H4
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.cpp23
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.H4
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.cpp4
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPml.H2
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPml.cpp4
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPmlRZ.H2
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPmlRZ.cpp4
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.H2
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp4
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.H2
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.cpp10
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.H4
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralKSpace.H2
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralKSpace.cpp15
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralSolver.H4
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralSolver.cpp14
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H2
-rw-r--r--Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp10
-rw-r--r--Source/FieldSolver/WarpX_QED_Field_Pushers.cpp4
-rw-r--r--Source/Initialization/WarpXInitData.cpp15
-rw-r--r--Source/Parallelization/GuardCellManager.H4
-rw-r--r--Source/Parallelization/GuardCellManager.cpp10
-rw-r--r--Source/Particles/MultiParticleContainer.cpp4
-rw-r--r--Source/Particles/WarpXParticleContainer.cpp4
-rw-r--r--Source/Utils/RelativeCellPosition.cpp2
-rw-r--r--Source/Utils/WarpXAlgorithmSelection.H8
-rw-r--r--Source/Utils/WarpXAlgorithmSelection.cpp9
-rw-r--r--Source/WarpX.H14
-rw-r--r--Source/WarpX.cpp201
65 files changed, 339 insertions, 267 deletions
diff --git a/Docs/source/developers/qed.rst b/Docs/source/developers/qed.rst
index 9a01886ca..f509d6ea3 100644
--- a/Docs/source/developers/qed.rst
+++ b/Docs/source/developers/qed.rst
@@ -40,6 +40,6 @@ pairs created.
At most one macroparticle is created per cell per timestep per species, with a weight corresponding to
the total number of physical pairs created.
-So far the Schwinger module requires using ``warpx.do_nodal=1`` or
-``algo.field_gathering=momentum-conserving`` (so that the auxiliary fields are calculated on the nodes)
+So far the Schwinger module requires using ``warpx.grid_type = collocated`` or
+``algo.field_gathering = momentum-conserving`` (so that the auxiliary fields are calculated on the nodes)
and is not compatible with either mesh refinement, RZ coordinates or single precision.
diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst
index ebf59e3bc..0e5f6f91c 100644
--- a/Docs/source/usage/parameters.rst
+++ b/Docs/source/usage/parameters.rst
@@ -1748,14 +1748,13 @@ Particle push, charge and current deposition, field gathering
* ``algo.field_gathering`` (`string`, optional)
The algorithm for field gathering. Available options are:
- - ``energy-conserving``: gathers directly from the grid points (either staggered
- or nodal gridpoints depending on ``warpx.do_nodal``).
- - ``momentum-conserving``: first average the fields from the grid points to
+ * ``energy-conserving``: gathers directly from the grid points (either staggered
+ or nodal grid points depending on ``warpx.grid_type``).
+ * ``momentum-conserving``: first average the fields from the grid points to
the nodes, and then gather from the nodes.
- If ``algo.field_gathering`` is not specified, the default is ``energy-conserving``.
- If ``warpx.do_nodal`` is ``true``, then ``energy-conserving`` and ``momentum-conserving``
- are equivalent.
+
+ Default: ``algo.field_gathering = energy-conserving`` with collocated or staggered grids (note that ``energy-conserving`` and ``momentum-conserving`` are equivalent with collocated grids), ``algo.field_gathering = momentum-conserving`` with hybrid grids.
* ``algo.particle_pusher`` (`string`, optional)
The algorithm for the particle pusher. Available options are:
@@ -1810,7 +1809,7 @@ Maxwell solver: PSATD method
* ``psatd.nx_guard``, ``psatd.ny_guard``, ``psatd.nz_guard`` (`integer`) optional
The number of guard cells to use with PSATD solver.
If not set by users, these values are calculated automatically and determined *empirically* and
- would be equal the order of the solver for nodal grid, and half the order of the solver for staggered.
+ equal the order of the solver for collocated grids and half the order of the solver for staggered grids.
* ``psatd.periodic_single_box_fft`` (`0` or `1`; default: 0)
If true, this will *not* incorporate the guard cells into the box over which FFTs are performed.
@@ -1967,31 +1966,42 @@ Maxwell solver: macroscopic media
Grid types (collocated, staggered, hybrid)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-* ``warpx.do_nodal`` (`0` or `1` ; default: 0)
- Whether to use a nodal grid (i.e. all fields are defined at the
- same points in space) or a staggered grid (i.e. Yee grid ; different
- fields are defined at different points in space)
+* ``warpx.grid_type`` (`string`, ``collocated``, ``staggered`` or ``hybrid``)
+ Whether to use a collocated grid (all fields defined at the cell nodes),
+ a staggered grid (fields defined on a Yee grid), or a hybrid grid (fields
+ and currents are interpolated back and forth between a staggered grid and a
+ nodal grid, must be used with momentum-conserving field gathering algorithm,
+ ``algo.field_gathering = momentum-conserving``).
+
+ Default: ``warpx.grid_type = staggered``.
* ``interpolation.galerkin_scheme`` (`0` or `1`)
Whether to use a Galerkin scheme when gathering fields to particles.
- When set to `1`, the interpolation orders used for field-gathering are reduced for certain field components along certain directions.
+ When set to ``1``, the interpolation orders used for field-gathering are reduced for certain field components along certain directions.
For example, :math:`E_z` is gathered using ``algo.particle_shape`` along :math:`(x,y)` and ``algo.particle_shape - 1`` along :math:`z`.
See equations (21)-(23) of (`Godfrey and Vay, 2013 <https://doi.org/10.1016/j.jcp.2013.04.006>`_) and associated references for details.
- Defaults to `1` unless ``warpx.do_nodal = 1`` and/or ``algo.field_gathering = momentum-conserving``.
+
+ Default: ``interpolation.galerkin_scheme = 0`` with collocated grids and/or momentum-conserving field gathering, ``interpolation.galerkin_scheme = 1`` otherwise.
.. warning::
The default behavior should not normally be changed.
At present, this parameter is intended mainly for testing and development purposes.
-* ``interpolation.field_centering_nox``, ``interpolation.field_centering_noy``, ``interpolation.field_centering_noz`` (default: ``2`` in all directions)
- The order of interpolation used with staggered grids (``warpx.do_nodal = 0``) and momentum-conserving field gathering (``algo.field_gathering = momentum-conserving``) to interpolate the electric and magnetic fields from the cell centers to the cell nodes, before gathering the fields from the cell nodes to the particle positions. High-order interpolation (order 8 in each direction, at least) is necessary to ensure stability in typical LWFA boosted-frame simulations using the Galilean PSATD or comoving PSATD schemes. This finite-order interpolation is used only when the PSATD solver is used for Maxwell's equations. With the FDTD solver, basic linear interpolation is used instead.
+* ``warpx.field_centering_nox``, ``warpx.field_centering_noy``, ``warpx.field_centering_noz`` (`integer`, optional)
+ The order of interpolation used with staggered or hybrid grids (``warpx.grid_type = staggered`` or ``warpx.grid_type = hybrid``) and momentum-conserving field gathering (``algo.field_gathering = momentum-conserving``) to interpolate the electric and magnetic fields from the cell centers to the cell nodes, before gathering the fields from the cell nodes to the particle positions.
+
+ Default: ``warpx.field_centering_no<x,y,z> = 2`` with staggered grids, ``warpx.field_centering_no<x,y,z> = 8`` with hybrid grids (typically necessary to ensure stability in boosted-frame simulations of relativistic plasmas and beams).
+
+* ``warpx.current_centering_nox``, ``warpx.current_centering_noy``, ``warpx.current_centering_noz`` (`integer`, optional)
+ The order of interpolation used with hybrid grids (``warpx.grid_type = hybrid``) to interpolate the currents from the cell nodes to the cell centers when ``warpx.do_current_centering = 1``, before pushing the Maxwell fields on staggered grids.
+
+ Default: ``warpx.current_centering_no<x,y,z> = 8`` with hybrid grids (typically necessary to ensure stability in boosted-frame simulations of relativistic plasmas and beams).
-* ``interpolation.current_centering_nox``, ``interpolation.current_centering_noy``, ``interpolation.current_centering_noz`` (default: ``2`` in all directions)
- The order of interpolation used to center the currents from nodal to staggered grids (if ``warpx.do_current_centering = 1``), before pushing the Maxwell fields on staggered grids. This finite-order interpolation is used only when the PSATD solver is used for Maxwell's equations. With the FDTD solver, basic linear interpolation is used instead.
+* ``warpx.do_current_centering`` (`bool`, `0` or `1`)
+ If true, the current is deposited on a nodal grid and then centered to a staggered grid (Yee grid), using finite-order interpolation.
-* ``warpx.do_current_centering`` (`0` or `1` ; default: 0)
- If true, the current is deposited on a nodal grid and then centered to a staggered grid (Yee grid), using finite-order interpolation. If ``warpx.do_nodal = 1``, the Maxwell fields are pushed on a nodal grid, it is not necessary to center the currents to a staggered grid, and we set therefore ``warpx.do_current_centering = 0`` automatically, overwriting the user-defined input.
+ Default: ``warpx.do_current_centering = 0`` with collocated or staggered grids, ``warpx.do_current_centering = 1`` with hybrid grids.
Additional parameters
^^^^^^^^^^^^^^^^^^^^^
@@ -2031,8 +2041,8 @@ Additional parameters
Will use the Hybird QED Maxwell solver when pushing fields: a QED correction is added to the
field solver to solve non-linear Maxwell's equations, according to [Quantum Electrodynamics
vacuum polarization solver, P. Carneiro et al., `ArXiv 2016 <https://arxiv.org/abs/1607.04224>`__].
- Note that this option can only be used with the PSATD build. Furthermore,
- warpx.do_nodal must be set to `1` which is not its default value.
+ Note that this option can only be used with the PSATD build. Furthermore, one must set
+ ``warpx.grid_type = collocated`` (which otherwise would be ``staggered`` by default).
* ``warpx.quantum_xi`` (`float`; default: 1.3050122.e-52)
Overwrites the actual quantum parameter used in Maxwell's QED equations. Assigning a
@@ -2856,7 +2866,7 @@ Lookup tables store pre-computed values for functions used by the QED modules.
Activating the Schwinger process requires the code to be compiled with ``QED=TRUE`` and ``PICSAR``.
If ``warpx.do_qed_schwinger = 1``, Schwinger product species must be specified with
``qed_schwinger.ele_product_species`` and ``qed_schwinger.pos_product_species``.
- Schwinger process requires either ``warpx.do_nodal=1`` or
+ Schwinger process requires either ``warpx.grid_type = collocated`` or
``algo.field_gathering=momentum-conserving`` (so that different field components are computed
at the same location in the grid) and does not currently support mesh refinement, cylindrical
coordinates or single precision.
diff --git a/Examples/Tests/comoving/inputs_2d_hybrid b/Examples/Tests/comoving/inputs_2d_hybrid
index 711fdf4fe..393e18d20 100644
--- a/Examples/Tests/comoving/inputs_2d_hybrid
+++ b/Examples/Tests/comoving/inputs_2d_hybrid
@@ -17,7 +17,6 @@ boundary.field_hi = periodic damped
algo.maxwell_solver = psatd
algo.current_deposition = direct
algo.charge_deposition = standard
-algo.field_gathering = momentum-conserving
algo.particle_pusher = vay
# Order of particle shape factors
@@ -27,7 +26,8 @@ psatd.use_default_v_comoving = 1
warpx.cfl = 1.
-warpx.do_nodal = 0
+warpx.grid_type = hybrid
+warpx.do_current_centering = 0
warpx.gamma_boost = 13.
warpx.boost_direction = z
@@ -41,9 +41,6 @@ warpx.use_filter = 1
warpx.serialize_initial_conditions = 1
warpx.verbose = 1
-interpolation.field_centering_nox = 8
-interpolation.field_centering_noz = 8
-
particles.species_names = electrons ions beam
particles.use_fdtd_nci_corr = 0
particles.rigid_injected_species = beam
diff --git a/Examples/Tests/maxwell_hybrid_qed/inputs_2d b/Examples/Tests/maxwell_hybrid_qed/inputs_2d
index 248f9794d..2baa72c09 100644
--- a/Examples/Tests/maxwell_hybrid_qed/inputs_2d
+++ b/Examples/Tests/maxwell_hybrid_qed/inputs_2d
@@ -9,7 +9,7 @@ geometry.dims = 2
geometry.prob_lo = -32.e-6 -512.e-6
geometry.prob_hi = 32.e-6 512.e-6
amr.max_level = 0
-warpx.do_nodal = 1
+warpx.grid_type = collocated
warpx.quantum_xi = 1.e-23
#################################
diff --git a/Examples/Tests/nci_psatd_stability/inputs_2d b/Examples/Tests/nci_psatd_stability/inputs_2d
index f19bfdde5..bfff5c124 100644
--- a/Examples/Tests/nci_psatd_stability/inputs_2d
+++ b/Examples/Tests/nci_psatd_stability/inputs_2d
@@ -38,7 +38,7 @@ algo.particle_shape = 3
#################################
particles.species_names = electrons ions
-warpx.do_nodal = 1
+warpx.grid_type = collocated
warpx.use_filter = 1
psatd.nox = 16
diff --git a/Examples/Tests/nci_psatd_stability/inputs_2d_hybrid b/Examples/Tests/nci_psatd_stability/inputs_2d_hybrid
index a9d1ecd14..90dfd58c4 100644
--- a/Examples/Tests/nci_psatd_stability/inputs_2d_hybrid
+++ b/Examples/Tests/nci_psatd_stability/inputs_2d_hybrid
@@ -15,7 +15,6 @@ boundary.field_hi = periodic damped
algo.maxwell_solver = psatd
algo.current_deposition = direct
algo.charge_deposition = standard
-algo.field_gathering = momentum-conserving
algo.particle_pusher = vay
# Order of particle shape factors
@@ -25,7 +24,8 @@ psatd.use_default_v_galilean = 1
warpx.cfl = 1.
-warpx.do_nodal = 0
+warpx.grid_type = hybrid
+warpx.do_current_centering = 0
warpx.gamma_boost = 13.
warpx.boost_direction = z
@@ -39,9 +39,6 @@ warpx.use_filter = 1
warpx.serialize_initial_conditions = 1
warpx.verbose = 1
-interpolation.field_centering_nox = 8
-interpolation.field_centering_noz = 8
-
particles.species_names = electrons ions beam
particles.use_fdtd_nci_corr = 0
particles.rigid_injected_species = beam
diff --git a/Examples/Tests/nci_psatd_stability/inputs_3d b/Examples/Tests/nci_psatd_stability/inputs_3d
index 53305af12..c7cd6989b 100644
--- a/Examples/Tests/nci_psatd_stability/inputs_3d
+++ b/Examples/Tests/nci_psatd_stability/inputs_3d
@@ -75,7 +75,7 @@ psatd.noz = 8
# warpx
warpx.cfl = 1.
-warpx.do_nodal = 1
+warpx.grid_type = collocated
warpx.numprocs = 1 1 2
warpx.use_filter = 1
warpx.verbose = 1
diff --git a/Examples/Tests/nci_psatd_stability/inputs_avg_2d b/Examples/Tests/nci_psatd_stability/inputs_avg_2d
index 2a2611906..9b5825284 100644
--- a/Examples/Tests/nci_psatd_stability/inputs_avg_2d
+++ b/Examples/Tests/nci_psatd_stability/inputs_avg_2d
@@ -40,7 +40,7 @@ algo.particle_shape = 3
#################################
particles.species_names = electrons ions
-warpx.do_nodal = 1
+warpx.grid_type = collocated
warpx.use_filter = 1
psatd.nox = 16
diff --git a/Examples/Tests/nci_psatd_stability/inputs_avg_3d b/Examples/Tests/nci_psatd_stability/inputs_avg_3d
index bb76e4c47..4d5bf781d 100644
--- a/Examples/Tests/nci_psatd_stability/inputs_avg_3d
+++ b/Examples/Tests/nci_psatd_stability/inputs_avg_3d
@@ -38,7 +38,7 @@ algo.particle_shape = 3
#################################
particles.species_names = electrons ions
-warpx.do_nodal = 1
+warpx.grid_type = collocated
warpx.use_filter = 1
psatd.nox = 8
diff --git a/Examples/Tests/nci_psatd_stability/inputs_rz b/Examples/Tests/nci_psatd_stability/inputs_rz
index 3f1871c00..f93ca27f4 100644
--- a/Examples/Tests/nci_psatd_stability/inputs_rz
+++ b/Examples/Tests/nci_psatd_stability/inputs_rz
@@ -34,7 +34,7 @@ algo.particle_shape = 3
#################################
particles.species_names = electrons ions
-warpx.do_nodal = 1
+warpx.grid_type = collocated
warpx.use_filter = 1
psatd.nox = 16
diff --git a/Examples/Tests/pml/inputs_3d b/Examples/Tests/pml/inputs_3d
index 7c7578ae4..e152afc7c 100644
--- a/Examples/Tests/pml/inputs_3d
+++ b/Examples/Tests/pml/inputs_3d
@@ -15,7 +15,7 @@ boundary.field_hi = pml pml pml
# Numerical parameters
warpx.cfl = 1.0
-warpx.do_nodal = 0
+warpx.grid_type = staggered
warpx.do_dive_cleaning = 1
warpx.do_divb_cleaning = 1
warpx.do_pml_dive_cleaning = 1
diff --git a/Examples/Tests/vay_deposition/inputs_2d b/Examples/Tests/vay_deposition/inputs_2d
index c4928831a..000c17052 100644
--- a/Examples/Tests/vay_deposition/inputs_2d
+++ b/Examples/Tests/vay_deposition/inputs_2d
@@ -59,7 +59,7 @@ psatd.update_with_rho = 0
# warpx
warpx.cfl = 0.9999
-warpx.do_nodal = 1
+warpx.grid_type = collocated
warpx.serialize_initial_conditions = 1
warpx.use_filter = 1
warpx.verbose = 1
diff --git a/Examples/Tests/vay_deposition/inputs_3d b/Examples/Tests/vay_deposition/inputs_3d
index a672049e1..f43f2924c 100644
--- a/Examples/Tests/vay_deposition/inputs_3d
+++ b/Examples/Tests/vay_deposition/inputs_3d
@@ -61,7 +61,7 @@ psatd.update_with_rho = 0
# warpx
warpx.cfl = 0.9999
-warpx.do_nodal = 1
+warpx.grid_type = collocated
warpx.serialize_initial_conditions = 1
warpx.use_filter = 1
warpx.verbose = 1
diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini
index 50893233a..7a458853e 100644
--- a/Regression/WarpX-GPU-tests.ini
+++ b/Regression/WarpX-GPU-tests.ini
@@ -367,7 +367,7 @@ numthreads = 1
compileTest = 0
doVis = 0
compareParticles = 0
-runtime_params = warpx.do_dynamic_scheduling=0 warpx.do_nodal=1 algo.current_deposition=direct
+runtime_params = warpx.do_dynamic_scheduling=0 warpx.grid_type=collocated algo.current_deposition=direct
particleTypes = electrons positrons
analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py
analysisOutputImage = langmuir_multi_analysis.png
@@ -393,7 +393,7 @@ analysisOutputImage = langmuir_multi_analysis.png
[Langmuir_multi_psatd_nodal]
buildDir = .
inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt
-runtime_params = algo.maxwell_solver=psatd warpx.do_dynamic_scheduling=0 warpx.do_nodal=1 algo.current_deposition=direct warpx.cfl = 0.5773502691896258
+runtime_params = algo.maxwell_solver=psatd warpx.do_dynamic_scheduling=0 warpx.grid_type=collocated algo.current_deposition=direct warpx.cfl = 0.5773502691896258
dim = 3
addToCompileString = USE_PSATD=TRUE USE_GPU=TRUE
restartTest = 0
@@ -421,7 +421,7 @@ numthreads = 1
compileTest = 0
doVis = 0
compareParticles = 0
-runtime_params = warpx.do_nodal=1 algo.current_deposition=direct diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz
+runtime_params = warpx.grid_type=collocated algo.current_deposition=direct diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz
particleTypes = electrons positrons
analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py
analysisOutputImage = langmuir_multi_2d_analysis.png
@@ -447,7 +447,7 @@ analysisOutputImage = langmuir_multi_2d_analysis.png
# [Langmuir_multi_2d_psatd_nodal]
# buildDir = .
# inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt
-# runtime_params = algo.maxwell_solver=psatd warpx.do_nodal=1 algo.current_deposition=direct diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot=Ex Ey Ez jx jy jz part_per_cell
+# runtime_params = algo.maxwell_solver=psatd warpx.grid_type=collocated algo.current_deposition=direct diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot=Ex Ey Ez jx jy jz part_per_cell
# dim = 2
# addToCompileString = USE_PSATD=TRUE USE_GPU=TRUE
# restartTest = 0
diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini
index 3932662bf..29304cc9a 100644
--- a/Regression/WarpX-tests.ini
+++ b/Regression/WarpX-tests.ini
@@ -407,7 +407,7 @@ analysisOutputImage = langmuir_multi_analysis.png
[Langmuir_multi_nodal]
buildDir = .
inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt
-runtime_params = warpx.do_dynamic_scheduling=0 warpx.do_nodal=1 algo.current_deposition=direct
+runtime_params = warpx.do_dynamic_scheduling=0 warpx.grid_type=collocated algo.current_deposition=direct
dim = 3
addToCompileString =
cmakeSetupOpts = -DWarpX_DIMS=3
@@ -464,7 +464,7 @@ analysisOutputImage = Langmuir_multi_psatd_multiJ.png
[Langmuir_multi_psatd_multiJ_nodal]
buildDir = .
inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt
-runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.5773502691896258 algo.current_deposition=direct psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.solution_type=first-order psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium warpx.do_nodal=1
+runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.5773502691896258 algo.current_deposition=direct psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.solution_type=first-order psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium warpx.grid_type=collocated
dim = 3
addToCompileString = USE_PSATD=TRUE
cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PSATD=ON
@@ -521,7 +521,7 @@ analysisOutputImage = langmuir_multi_analysis.png
[Langmuir_multi_psatd_current_correction_nodal]
buildDir = .
inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt
-runtime_params = algo.maxwell_solver=psatd algo.current_deposition=direct psatd.periodic_single_box_fft=1 psatd.current_correction=1 warpx.do_nodal=1 diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz part_per_cell rho divE warpx.cfl = 0.5773502691896258
+runtime_params = algo.maxwell_solver=psatd algo.current_deposition=direct psatd.periodic_single_box_fft=1 psatd.current_correction=1 warpx.grid_type=collocated diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz part_per_cell rho divE warpx.cfl = 0.5773502691896258
dim = 3
addToCompileString = USE_PSATD=TRUE
cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PSATD=ON
@@ -559,7 +559,7 @@ analysisOutputImage = langmuir_multi_analysis.png
[Langmuir_multi_psatd_Vay_deposition_nodal]
buildDir = .
inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt
-runtime_params = algo.maxwell_solver=psatd warpx.do_nodal=1 algo.current_deposition=vay diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.5773502691896258
+runtime_params = algo.maxwell_solver=psatd warpx.grid_type=collocated algo.current_deposition=vay diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.5773502691896258
dim = 3
addToCompileString = USE_PSATD=TRUE
cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PSATD=ON
@@ -597,7 +597,7 @@ analysisOutputImage = langmuir_multi_analysis.png
[Langmuir_multi_psatd_nodal]
buildDir = .
inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt
-runtime_params = algo.maxwell_solver=psatd warpx.do_dynamic_scheduling=0 warpx.do_nodal=1 algo.current_deposition=direct warpx.cfl = 0.5773502691896258 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium
+runtime_params = algo.maxwell_solver=psatd warpx.do_dynamic_scheduling=0 warpx.grid_type=collocated algo.current_deposition=direct warpx.cfl = 0.5773502691896258 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium
dim = 3
addToCompileString = USE_PSATD=TRUE
cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PSATD=ON
@@ -635,7 +635,7 @@ analysisOutputImage = langmuir_multi_analysis.png
[Langmuir_multi_2d_nodal]
buildDir = .
inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt
-runtime_params = warpx.do_nodal=1 algo.current_deposition=direct diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz
+runtime_params = warpx.grid_type=collocated algo.current_deposition=direct diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz
dim = 2
addToCompileString =
cmakeSetupOpts = -DWarpX_DIMS=2
@@ -749,7 +749,7 @@ analysisOutputImage = Langmuir_multi_2d_psatd_multiJ.png
[Langmuir_multi_2d_psatd_multiJ_nodal]
buildDir = .
inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt
-runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.7071067811865475 psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.solution_type=first-order psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium warpx.do_nodal=1
+runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.7071067811865475 psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.solution_type=first-order psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium warpx.grid_type=collocated
dim = 2
addToCompileString = USE_PSATD=TRUE
cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_PSATD=ON
@@ -806,7 +806,7 @@ analysisOutputImage = langmuir_multi_2d_analysis.png
[Langmuir_multi_2d_psatd_current_correction_nodal]
buildDir = .
inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt
-runtime_params = algo.maxwell_solver=psatd amr.max_grid_size=128 algo.current_deposition=direct psatd.periodic_single_box_fft=1 psatd.current_correction=1 warpx.do_nodal=1 diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot =Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.7071067811865475
+runtime_params = algo.maxwell_solver=psatd amr.max_grid_size=128 algo.current_deposition=direct psatd.periodic_single_box_fft=1 psatd.current_correction=1 warpx.grid_type=collocated diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot =Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.7071067811865475
dim = 2
addToCompileString = USE_PSATD=TRUE
cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_PSATD=ON
@@ -844,7 +844,7 @@ analysisOutputImage = langmuir_multi_2d_analysis.png
[Langmuir_multi_2d_psatd_Vay_deposition_nodal]
buildDir = .
inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt
-runtime_params = algo.maxwell_solver=psatd amr.max_grid_size=128 warpx.do_nodal=1 algo.current_deposition=vay diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.7071067811865475
+runtime_params = algo.maxwell_solver=psatd amr.max_grid_size=128 warpx.grid_type=collocated algo.current_deposition=vay diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.7071067811865475
dim = 2
addToCompileString = USE_PSATD=TRUE
cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_PSATD=ON
@@ -863,7 +863,7 @@ analysisOutputImage = langmuir_multi_2d_analysis.png
[Langmuir_multi_2d_psatd_nodal]
buildDir = .
inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt
-runtime_params = algo.maxwell_solver=psatd warpx.do_nodal=1 algo.current_deposition=direct diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot=Ex Ey Ez jx jy jz part_per_cell warpx.cfl = 0.7071067811865475 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium
+runtime_params = algo.maxwell_solver=psatd warpx.grid_type=collocated algo.current_deposition=direct diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot=Ex Ey Ez jx jy jz part_per_cell warpx.cfl = 0.7071067811865475 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium
dim = 2
addToCompileString = USE_PSATD=TRUE
cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_PSATD=ON
@@ -1998,7 +1998,7 @@ analysisRoutine = Examples/analysis_default_regression.py
[PlasmaAccelerationBoost3d_hybrid]
buildDir = .
inputFile = Examples/Physics_applications/plasma_acceleration/inputs_3d_boost
-runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 amr.n_cell=64 64 128 max_step=25 warpx.do_nodal=0 algo.field_gathering=momentum-conserving interpolation.field_centering_nox=8 interpolation.field_centering_noy=8 interpolation.field_centering_noz=8
+runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 amr.n_cell=64 64 128 max_step=25 warpx.grid_type=hybrid warpx.do_current_centering=0
dim = 3
addToCompileString =
restartTest = 0
@@ -2584,7 +2584,7 @@ analysisRoutine = Examples/Tests/particle_fields_diags/analysis_particle_diags_s
[galilean_2d_psatd]
buildDir = .
inputFile = Examples/Tests/nci_psatd_stability/inputs_2d
-runtime_params = warpx.do_nodal=1 algo.current_deposition=direct psatd.current_correction=0 warpx.abort_on_warning_threshold=medium
+runtime_params = warpx.grid_type=collocated algo.current_deposition=direct psatd.current_correction=0 warpx.abort_on_warning_threshold=medium
dim = 2
addToCompileString = USE_PSATD=TRUE
cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_PSATD=ON
@@ -2800,7 +2800,7 @@ analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py
[averaged_galilean_2d_psatd_hybrid]
buildDir = .
inputFile = Examples/Tests/nci_psatd_stability/inputs_avg_2d
-runtime_params = amr.max_grid_size_x = 128 amr.max_grid_size_y = 64 warpx.do_nodal = 0 algo.field_gathering = momentum-conserving interpolation.field_centering_nox = 8 interpolation.field_centering_noz = 8 warpx.do_current_centering = 1 interpolation.current_centering_nox = 8 interpolation.current_centering_noz = 8 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium
+runtime_params = amr.max_grid_size_x=128 amr.max_grid_size_y=64 warpx.grid_type=hybrid psatd.current_correction=0 warpx.abort_on_warning_threshold=medium
dim = 2
addToCompileString = USE_PSATD=TRUE
cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_PSATD=ON
@@ -2836,7 +2836,7 @@ analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py
[averaged_galilean_3d_psatd_hybrid]
buildDir = .
inputFile = Examples/Tests/nci_psatd_stability/inputs_avg_3d
-runtime_params = warpx.do_nodal = 0 algo.field_gathering = momentum-conserving interpolation.field_centering_nox = 8 interpolation.field_centering_noy = 8 interpolation.field_centering_noz = 8 warpx.do_current_centering = 1 interpolation.current_centering_nox = 8 interpolation.current_centering_noy = 8 interpolation.current_centering_noz = 8 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium
+runtime_params = warpx.grid_type=hybrid psatd.current_correction=0 warpx.abort_on_warning_threshold=medium
dim = 3
addToCompileString = USE_PSATD=TRUE
cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PSATD=ON
diff --git a/Source/BoundaryConditions/PML.H b/Source/BoundaryConditions/PML.H
index e519298ca..e5a8b0c28 100644
--- a/Source/BoundaryConditions/PML.H
+++ b/Source/BoundaryConditions/PML.H
@@ -131,7 +131,7 @@ public:
const amrex::BoxArray& ba, const amrex::DistributionMapping& dm,
const amrex::Geometry* geom, const amrex::Geometry* cgeom,
int ncell, int delta, amrex::IntVect ref_ratio,
- amrex::Real dt, int nox_fft, int noy_fft, int noz_fft, bool do_nodal,
+ amrex::Real dt, int nox_fft, int noy_fft, int noz_fft, short grid_type,
int do_moving_window, int pml_has_particles, int do_pml_in_domain,
const int psatd_solution_type, const int J_in_time, const int rho_in_time,
const bool do_pml_dive_cleaning, const bool do_pml_divb_cleaning,
diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp
index 0739175cb..bb74b8ec4 100644
--- a/Source/BoundaryConditions/PML.cpp
+++ b/Source/BoundaryConditions/PML.cpp
@@ -546,7 +546,7 @@ MultiSigmaBox::ComputePMLFactorsE (const Real* dx, Real dt)
PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& grid_dm,
const Geometry* geom, const Geometry* cgeom,
int ncell, int delta, amrex::IntVect ref_ratio,
- Real dt, int nox_fft, int noy_fft, int noz_fft, bool do_nodal,
+ Real dt, int nox_fft, int noy_fft, int noz_fft, short grid_type,
int do_moving_window, int /*pml_has_particles*/, int do_pml_in_domain,
const int psatd_solution_type, const int J_in_time, const int rho_in_time,
const bool do_pml_dive_cleaning, const bool do_pml_divb_cleaning,
@@ -613,9 +613,9 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri
if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) {
// Increase the number of guard cells, in order to fit the extent
// of the stencil for the spectral solver
- int ngFFt_x = do_nodal ? nox_fft : nox_fft/2;
- int ngFFt_y = do_nodal ? noy_fft : noy_fft/2;
- int ngFFt_z = do_nodal ? noz_fft : noz_fft/2;
+ int ngFFt_x = (grid_type == GridType::Collocated) ? nox_fft : nox_fft/2;
+ int ngFFt_y = (grid_type == GridType::Collocated) ? noy_fft : noy_fft/2;
+ int ngFFt_z = (grid_type == GridType::Collocated) ? noz_fft : noz_fft/2;
ParmParse pp_psatd("psatd");
utils::parser::queryWithParser(pp_psatd, "nx_guard", ngFFt_x);
@@ -710,8 +710,9 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri
if (m_divb_cleaning)
{
// TODO Shall we define a separate guard cells parameter ngG?
- const amrex::IntVect& G_nodal_flag = (do_nodal) ? amrex::IntVect::TheNodeVector()
- : amrex::IntVect::TheCellVector();
+ const amrex::IntVect& G_nodal_flag =
+ (grid_type == GridType::Collocated) ? amrex::IntVect::TheNodeVector()
+ : amrex::IntVect::TheCellVector();
const amrex::BoxArray ba_G_nodal = amrex::convert(ba, G_nodal_flag);
WarpX::AllocInitMultiFab(pml_G_fp, ba_G_nodal, dm, 3, ngf, "pml_G_fp", 0.0_rt);
}
@@ -742,7 +743,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri
amrex::Vector<amrex::Real> const v_comoving_zero = {0., 0., 0.};
realspace_ba.enclosedCells().grow(nge); // cell-centered + guard cells
spectral_solver_fp = std::make_unique<SpectralSolver>(lev, realspace_ba, dm,
- nox_fft, noy_fft, noz_fft, do_nodal, v_galilean_zero,
+ nox_fft, noy_fft, noz_fft, grid_type, v_galilean_zero,
v_comoving_zero, dx, dt, in_pml, periodic_single_box, update_with_rho,
fft_do_time_averaging, psatd_solution_type, J_in_time, rho_in_time, m_dive_cleaning, m_divb_cleaning);
#endif
@@ -814,8 +815,9 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri
if (m_divb_cleaning)
{
// TODO Shall we define a separate guard cells parameter ngG?
- const amrex::IntVect& G_nodal_flag = (do_nodal) ? amrex::IntVect::TheNodeVector()
- : amrex::IntVect::TheCellVector();
+ const amrex::IntVect& G_nodal_flag =
+ (grid_type == GridType::Collocated) ? amrex::IntVect::TheNodeVector()
+ : amrex::IntVect::TheCellVector();
const amrex::BoxArray cba_G_nodal = amrex::convert(cba, G_nodal_flag);
WarpX::AllocInitMultiFab( pml_G_cp, cba_G_nodal, cdm, 3, ngf, "pml_G_cp", 0.0_rt);
}
@@ -849,7 +851,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri
amrex::Vector<amrex::Real> const v_comoving_zero = {0., 0., 0.};
realspace_cba.enclosedCells().grow(nge); // cell-centered + guard cells
spectral_solver_cp = std::make_unique<SpectralSolver>(lev, realspace_cba, cdm,
- nox_fft, noy_fft, noz_fft, do_nodal, v_galilean_zero,
+ nox_fft, noy_fft, noz_fft, grid_type, v_galilean_zero,
v_comoving_zero, cdx, dt, in_pml, periodic_single_box, update_with_rho,
fft_do_time_averaging, psatd_solution_type, J_in_time, rho_in_time, m_dive_cleaning, m_divb_cleaning);
#endif
diff --git a/Source/Evolve/WarpXComputeDt.cpp b/Source/Evolve/WarpXComputeDt.cpp
index 80c77b71b..3c1221e8e 100644
--- a/Source/Evolve/WarpXComputeDt.cpp
+++ b/Source/Evolve/WarpXComputeDt.cpp
@@ -65,7 +65,7 @@ WarpX::ComputeDt ()
deltat = cfl * CylindricalYeeAlgorithm::ComputeMaxDt(dx, n_rz_azimuthal_modes);
#else
// - In Cartesian geometry
- if (do_nodal) {
+ if (grid_type == GridType::Collocated) {
deltat = cfl * CartesianNodalAlgorithm::ComputeMaxDt(dx);
} else if (electromagnetic_solver_id == ElectromagneticSolverAlgo::Yee
|| electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) {
diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ComputeDivE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ComputeDivE.cpp
index 9ac32a19d..2725e4310 100644
--- a/Source/FieldSolver/FiniteDifferenceSolver/ComputeDivE.cpp
+++ b/Source/FieldSolver/FiniteDifferenceSolver/ComputeDivE.cpp
@@ -51,7 +51,7 @@ void FiniteDifferenceSolver::ComputeDivE (
ComputeDivECylindrical <CylindricalYeeAlgorithm> ( Efield, divEfield );
#else
- if (m_do_nodal) {
+ if (m_grid_type == GridType::Collocated) {
ComputeDivECartesian <CartesianNodalAlgorithm> ( Efield, divEfield );
diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp
index 0e6515a73..b53f0780c 100644
--- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp
+++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp
@@ -70,11 +70,11 @@ void FiniteDifferenceSolver::EvolveB (
ignore_unused(Gfield, face_areas);
EvolveBCylindrical <CylindricalYeeAlgorithm> ( Bfield, Efield, lev, dt );
#else
- if(m_do_nodal or m_fdtd_algo != ElectromagneticSolverAlgo::ECT){
+ if(m_grid_type == GridType::Collocated || m_fdtd_algo != ElectromagneticSolverAlgo::ECT){
amrex::ignore_unused(face_areas);
}
- if (m_do_nodal) {
+ if (m_grid_type == GridType::Collocated) {
EvolveBCartesian <CartesianNodalAlgorithm> ( Bfield, Efield, Gfield, lev, dt );
diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveBPML.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveBPML.cpp
index 2907e3fdf..ba0d7c9ce 100644
--- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveBPML.cpp
+++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveBPML.cpp
@@ -53,7 +53,7 @@ void FiniteDifferenceSolver::EvolveBPML (
amrex::Abort(Utils::TextMsg::Err(
"PML are not implemented in cylindrical geometry."));
#else
- if (m_do_nodal) {
+ if (m_grid_type == GridType::Collocated) {
EvolveBPMLCartesian <CartesianNodalAlgorithm> (Bfield, Efield, dt, dive_cleaning);
diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp
index fe6181aca..d37e5f744 100644
--- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp
+++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp
@@ -70,7 +70,7 @@ void FiniteDifferenceSolver::EvolveE (
ignore_unused(edge_lengths);
EvolveECylindrical <CylindricalYeeAlgorithm> ( Efield, Bfield, Jfield, Ffield, lev, dt );
#else
- if (m_do_nodal) {
+ if (m_grid_type == GridType::Collocated) {
EvolveECartesian <CartesianNodalAlgorithm> ( Efield, Bfield, Jfield, edge_lengths, Ffield, lev, dt );
diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp
index 1dadd8e9f..d3475b0c9 100644
--- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp
+++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp
@@ -59,7 +59,7 @@ void FiniteDifferenceSolver::EvolveEPML (
amrex::Abort(Utils::TextMsg::Err(
"PML are not implemented in cylindrical geometry."));
#else
- if (m_do_nodal) {
+ if (m_grid_type == GridType::Collocated) {
EvolveEPMLCartesian <CartesianNodalAlgorithm> (
Efield, Bfield, Jfield, edge_lengths, Ffield, sigba, dt, pml_has_particles );
diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveF.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveF.cpp
index 27d7fb4e5..171967b81 100644
--- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveF.cpp
+++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveF.cpp
@@ -58,7 +58,7 @@ void FiniteDifferenceSolver::EvolveF (
EvolveFCylindrical <CylindricalYeeAlgorithm> ( Ffield, Efield, rhofield, rhocomp, dt );
#else
- if (m_do_nodal) {
+ if (m_grid_type == GridType::Collocated) {
EvolveFCartesian <CartesianNodalAlgorithm> ( Ffield, Efield, rhofield, rhocomp, dt );
diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveFPML.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveFPML.cpp
index 6a94d205a..1cb5201cd 100644
--- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveFPML.cpp
+++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveFPML.cpp
@@ -51,7 +51,7 @@ void FiniteDifferenceSolver::EvolveFPML (
amrex::Abort(Utils::TextMsg::Err(
"PML are not implemented in cylindrical geometry."));
#else
- if (m_do_nodal) {
+ if (m_grid_type == GridType::Collocated) {
EvolveFPMLCartesian <CartesianNodalAlgorithm> ( Ffield, Efield, dt );
diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveG.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveG.cpp
index 0c971d577..c31929258 100644
--- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveG.cpp
+++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveG.cpp
@@ -47,7 +47,7 @@ void FiniteDifferenceSolver::EvolveG (
amrex::ignore_unused(Gfield, Bfield, dt);
#else
// Select algorithm
- if (m_do_nodal)
+ if (m_grid_type == GridType::Collocated)
{
EvolveGCartesian<CartesianNodalAlgorithm>(Gfield, Bfield, dt);
}
diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H
index ed6f918b3..aecf4ed9e 100644
--- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H
+++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H
@@ -39,12 +39,12 @@ class FiniteDifferenceSolver
*
* \param fdtd_algo Identifies the chosen algorithm, as defined in WarpXAlgorithmSelection.H
* \param cell_size Cell size along each dimension, for the chosen refinement level
- * \param do_nodal Whether the solver is applied to a nodal or staggered grid
+ * \param grid_type Whether the solver is applied to a collocated or staggered grid
*/
FiniteDifferenceSolver (
int const fdtd_algo,
std::array<amrex::Real,3> cell_size,
- bool const do_nodal );
+ short const grid_type );
void EvolveB ( std::array< std::unique_ptr<amrex::MultiFab>, 3 >& Bfield,
std::array< std::unique_ptr<amrex::MultiFab>, 3 > const& Efield,
@@ -132,7 +132,7 @@ class FiniteDifferenceSolver
private:
int m_fdtd_algo;
- bool m_do_nodal;
+ short m_grid_type;
#ifdef WARPX_DIM_RZ
amrex::Real m_dr, m_rmin;
diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.cpp b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.cpp
index e8529ef15..a65d86b8f 100644
--- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.cpp
+++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.cpp
@@ -30,11 +30,11 @@
FiniteDifferenceSolver::FiniteDifferenceSolver (
int const fdtd_algo,
std::array<amrex::Real,3> cell_size,
- bool do_nodal ) {
+ short grid_type) {
// Register the type of finite-difference algorithm
m_fdtd_algo = fdtd_algo;
- m_do_nodal = do_nodal;
+ m_grid_type = grid_type;
// return if not FDTD
if (fdtd_algo == ElectromagneticSolverAlgo::None || fdtd_algo == ElectromagneticSolverAlgo::PSATD)
@@ -62,7 +62,7 @@ FiniteDifferenceSolver::FiniteDifferenceSolver (
"FiniteDifferenceSolver: Unknown algorithm"));
}
#else
- if (do_nodal) {
+ if (grid_type == GridType::Collocated) {
CartesianNodalAlgorithm::InitializeStencilCoefficients( cell_size,
m_h_stencil_coefs_x, m_h_stencil_coefs_y, m_h_stencil_coefs_z );
diff --git a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp
index 22a222726..487ab0652 100644
--- a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp
+++ b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp
@@ -51,7 +51,7 @@ void FiniteDifferenceSolver::MacroscopicEvolveE (
"currently macro E-push does not work for RZ"));
#else
WARPX_ALWAYS_ASSERT_WITH_MESSAGE(
- !m_do_nodal, "macro E-push does not work for nodal");
+ m_grid_type != GridType::Collocated, "Macroscopic E field solver does not work on collocated grids");
if (m_fdtd_algo == ElectromagneticSolverAlgo::Yee) {
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.H
index bc7dc7699..bb9775e1c 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.H
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.H
@@ -32,7 +32,7 @@ class PsatdAlgorithmComoving : public SpectralBaseAlgorithm
const int norder_x,
const int norder_y,
const int norder_z,
- const bool nodal,
+ const short grid_type,
const amrex::Vector<amrex::Real>& v_comoving,
const amrex::Real dt,
const bool update_with_rho);
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.cpp
index dfd7ffe92..8f576dad7 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.cpp
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.cpp
@@ -1,6 +1,7 @@
#include "PsatdAlgorithmComoving.H"
#include "Utils/TextMsg.H"
+#include "Utils/WarpXAlgorithmSelection.H"
#include "Utils/WarpXConst.H"
#include "Utils/WarpX_Complex.H"
@@ -25,21 +26,21 @@ PsatdAlgorithmComoving::PsatdAlgorithmComoving (const SpectralKSpace& spectral_k
const DistributionMapping& dm,
const SpectralFieldIndex& spectral_index,
const int norder_x, const int norder_y,
- const int norder_z, const bool nodal,
+ const int norder_z, const short grid_type,
const amrex::Vector<amrex::Real>& v_comoving,
const amrex::Real dt,
const bool update_with_rho)
// Members initialization
- : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, nodal),
+ : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, grid_type),
m_spectral_index(spectral_index),
// Initialize the infinite-order k vectors (the argument n_order = -1 selects
- // the infinite order option, the argument nodal = false is then irrelevant)
- kx_vec(spectral_kspace.getModifiedKComponent(dm, 0, -1, false)),
+ // the infinite order option, the argument grid_type=GridType::Staggered is then irrelevant)
+ kx_vec(spectral_kspace.getModifiedKComponent(dm, 0, -1, GridType::Staggered)),
#if defined(WARPX_DIM_3D)
- ky_vec(spectral_kspace.getModifiedKComponent(dm, 1, -1, false)),
- kz_vec(spectral_kspace.getModifiedKComponent(dm, 2, -1, false)),
+ ky_vec(spectral_kspace.getModifiedKComponent(dm, 1, -1, GridType::Staggered)),
+ kz_vec(spectral_kspace.getModifiedKComponent(dm, 2, -1, GridType::Staggered)),
#else
- kz_vec(spectral_kspace.getModifiedKComponent(dm, 1, -1, false)),
+ kz_vec(spectral_kspace.getModifiedKComponent(dm, 1, -1, GridType::Staggered)),
#endif
m_v_comoving(v_comoving),
m_dt(dt)
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.H
index c90b19e1a..512c4f489 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.H
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.H
@@ -37,7 +37,7 @@ class PsatdAlgorithmFirstOrder : public SpectralBaseAlgorithm
* \param[in] norder_x order of the spectral solver along x
* \param[in] norder_y order of the spectral solver along y
* \param[in] norder_z order of the spectral solver along z
- * \param[in] nodal whether the E and B fields are defined on a fully nodal grid or a Yee grid
+ * \param[in] grid_type type of grid (collocated or not)
* \param[in] dt time step of the simulation
* \param[in] div_cleaning whether to use divergence correction for both E and B (thus, F and G)
* \param[in] J_in_time time dependency of J (currently supported: constant, linear)
@@ -50,7 +50,7 @@ class PsatdAlgorithmFirstOrder : public SpectralBaseAlgorithm
const int norder_x,
const int norder_y,
const int norder_z,
- const bool nodal,
+ const short grid_type,
const amrex::Real dt,
const bool div_cleaning,
const int J_in_time,
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.cpp
index d32604760..3701fb889 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.cpp
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.cpp
@@ -35,13 +35,13 @@ PsatdAlgorithmFirstOrder::PsatdAlgorithmFirstOrder(
const int norder_x,
const int norder_y,
const int norder_z,
- const bool nodal,
+ const short grid_type,
const amrex::Real dt,
const bool div_cleaning,
const int J_in_time,
const int rho_in_time)
// Initializer list
- : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, nodal),
+ : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, grid_type),
m_spectral_index(spectral_index),
m_dt(dt),
m_div_cleaning(div_cleaning),
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmGalileanRZ.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmGalileanRZ.H
index d3c519c94..aee8bc5ed 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmGalileanRZ.H
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmGalileanRZ.H
@@ -20,7 +20,7 @@ class PsatdAlgorithmGalileanRZ : public SpectralBaseAlgorithmRZ
amrex::DistributionMapping const & dm,
const SpectralFieldIndex& spectral_index,
int const n_rz_azimuthal_modes, int const norder_z,
- bool const nodal,
+ short const grid_type,
const amrex::Vector<amrex::Real>& v_galilean,
amrex::Real const dt_step,
bool const update_with_rho);
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmGalileanRZ.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmGalileanRZ.cpp
index af3e2e336..75f0e49d1 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmGalileanRZ.cpp
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmGalileanRZ.cpp
@@ -21,12 +21,12 @@ PsatdAlgorithmGalileanRZ::PsatdAlgorithmGalileanRZ (SpectralKSpaceRZ const & spe
amrex::DistributionMapping const & dm,
const SpectralFieldIndex& spectral_index,
int const n_rz_azimuthal_modes, int const norder_z,
- bool const nodal,
+ short const grid_type,
const amrex::Vector<amrex::Real>& v_galilean,
amrex::Real const dt,
bool const update_with_rho)
// Initialize members of base class
- : SpectralBaseAlgorithmRZ(spectral_kspace, dm, spectral_index, norder_z, nodal),
+ : SpectralBaseAlgorithmRZ(spectral_kspace, dm, spectral_index, norder_z, grid_type),
m_spectral_index(spectral_index),
m_dt(dt),
m_v_galilean(v_galilean),
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.H
index 0d2d67434..0dd3d179d 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.H
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.H
@@ -37,7 +37,7 @@ class PsatdAlgorithmJConstantInTime : public SpectralBaseAlgorithm
* \param[in] norder_x order of the spectral solver along x
* \param[in] norder_y order of the spectral solver along y
* \param[in] norder_z order of the spectral solver along z
- * \param[in] nodal whether the E and B fields are defined on a fully nodal grid or a Yee grid
+ * \param[in] grid_type type of grid (collocated or not)
* \param[in] v_galilean Galilean velocity (three-dimensional array)
* \param[in] dt time step of the simulation
* \param[in] update_with_rho whether the update equation for E uses rho or not
@@ -52,7 +52,7 @@ class PsatdAlgorithmJConstantInTime : public SpectralBaseAlgorithm
const int norder_x,
const int norder_y,
const int norder_z,
- const bool nodal,
+ const short grid_type,
const amrex::Vector<amrex::Real>& v_galilean,
const amrex::Real dt,
const bool update_with_rho,
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.cpp
index c52437bf8..8c28c42b2 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.cpp
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.cpp
@@ -7,6 +7,7 @@
#include "PsatdAlgorithmJConstantInTime.H"
#include "Utils/TextMsg.H"
+#include "Utils/WarpXAlgorithmSelection.H"
#include "Utils/WarpXConst.H"
#include "Utils/WarpX_Complex.H"
@@ -34,7 +35,7 @@ PsatdAlgorithmJConstantInTime::PsatdAlgorithmJConstantInTime(
const int norder_x,
const int norder_y,
const int norder_z,
- const bool nodal,
+ const short grid_type,
const amrex::Vector<amrex::Real>& v_galilean,
const amrex::Real dt,
const bool update_with_rho,
@@ -42,17 +43,17 @@ PsatdAlgorithmJConstantInTime::PsatdAlgorithmJConstantInTime(
const bool dive_cleaning,
const bool divb_cleaning)
// Initializer list
- : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, nodal),
+ : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, grid_type),
m_spectral_index(spectral_index),
// Initialize the centered finite-order modified k vectors:
// these are computed always with the assumption of centered grids
- // (argument nodal = true), for both nodal and staggered simulations
- modified_kx_vec_centered(spectral_kspace.getModifiedKComponent(dm, 0, norder_x, true)),
+ // (argument grid_type=GridType::Collocated), for both collocated and staggered grids
+ modified_kx_vec_centered(spectral_kspace.getModifiedKComponent(dm, 0, norder_x, GridType::Collocated)),
#if defined(WARPX_DIM_3D)
- modified_ky_vec_centered(spectral_kspace.getModifiedKComponent(dm, 1, norder_y, true)),
- modified_kz_vec_centered(spectral_kspace.getModifiedKComponent(dm, 2, norder_z, true)),
+ modified_ky_vec_centered(spectral_kspace.getModifiedKComponent(dm, 1, norder_y, GridType::Collocated)),
+ modified_kz_vec_centered(spectral_kspace.getModifiedKComponent(dm, 2, norder_z, GridType::Collocated)),
#else
- modified_kz_vec_centered(spectral_kspace.getModifiedKComponent(dm, 1, norder_z, true)),
+ modified_kz_vec_centered(spectral_kspace.getModifiedKComponent(dm, 1, norder_z, GridType::Collocated)),
#endif
m_v_galilean(v_galilean),
m_dt(dt),
@@ -419,8 +420,8 @@ void PsatdAlgorithmJConstantInTime::InitializeSpectralCoefficients (
const amrex::Real dt2 = std::pow(dt, 2);
// Calculate the dot product of the k vector with the Galilean velocity.
- // This has to be computed always with the centered (that is, nodal) finite-order
- // modified k vectors, to work correctly for both nodal and staggered simulations.
+ // This has to be computed always with the centered (collocated) finite-order
+ // modified k vectors, to work correctly for both collocated and staggered grids.
// w_c = 0 always with standard PSATD (zero Galilean velocity).
const amrex::Real w_c = kx_c[i]*vg_x +
#if defined(WARPX_DIM_3D)
@@ -580,8 +581,8 @@ void PsatdAlgorithmJConstantInTime::InitializeSpectralCoefficientsAveraging (
const amrex::Real dt2 = std::pow(dt, 2);
// Calculate the dot product of the k vector with the Galilean velocity.
- // This has to be computed always with the centered (that is, nodal) finite-order
- // modified k vectors, to work correctly for both nodal and staggered simulations.
+ // This has to be computed always with the centered (collocated) finite-order
+ // modified k vectors, to work correctly for both collocated and staggered grids.
// w_c = 0 always with standard PSATD (zero Galilean velocity).
const amrex::Real w_c = kx_c[i]*vg_x +
#if defined(WARPX_DIM_3D)
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.H
index e0eded5f5..13db64b74 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.H
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.H
@@ -40,7 +40,7 @@ class PsatdAlgorithmJLinearInTime : public SpectralBaseAlgorithm
* \param[in] norder_x order of the spectral solver along x
* \param[in] norder_y order of the spectral solver along y
* \param[in] norder_z order of the spectral solver along z
- * \param[in] nodal whether the E and B fields are defined on a fully nodal grid or a Yee grid
+ * \param[in] grid_type type of grid (collocated or not)
* \param[in] dt time step of the simulation
* \param[in] time_averaging whether to use time averaging for large time steps
* \param[in] dive_cleaning Update F as part of the field update, so that errors in divE=rho propagate away at the speed of light
@@ -53,7 +53,7 @@ class PsatdAlgorithmJLinearInTime : public SpectralBaseAlgorithm
const int norder_x,
const int norder_y,
const int norder_z,
- const bool nodal,
+ const short grid_type,
const amrex::Real dt,
const bool time_averaging,
const bool dive_cleaning,
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.cpp
index 861001c78..b0580422b 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.cpp
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.cpp
@@ -34,13 +34,13 @@ PsatdAlgorithmJLinearInTime::PsatdAlgorithmJLinearInTime(
const int norder_x,
const int norder_y,
const int norder_z,
- const bool nodal,
+ const short grid_type,
const amrex::Real dt,
const bool time_averaging,
const bool dive_cleaning,
const bool divb_cleaning)
// Initializer list
- : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, nodal),
+ : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, grid_type),
m_spectral_index(spectral_index),
m_dt(dt),
m_time_averaging(time_averaging),
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPml.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPml.H
index 0c1aeb885..e858dc1bf 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPml.H
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPml.H
@@ -31,7 +31,7 @@ class PsatdAlgorithmPml : public SpectralBaseAlgorithm
const amrex::DistributionMapping& dm,
const SpectralFieldIndex& spectral_index,
const int norder_x, const int norder_y,
- const int norder_z, const bool nodal,
+ const int norder_z, const short grid_type,
const amrex::Real dt,
const bool dive_cleaning,
const bool divb_cleaning);
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPml.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPml.cpp
index 45a4d1580..217998d18 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPml.cpp
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPml.cpp
@@ -34,11 +34,11 @@ PsatdAlgorithmPml::PsatdAlgorithmPml(const SpectralKSpace& spectral_kspace,
const DistributionMapping& dm,
const SpectralFieldIndex& spectral_index,
const int norder_x, const int norder_y,
- const int norder_z, const bool nodal,
+ const int norder_z, const short grid_type,
const Real dt,
const bool dive_cleaning, const bool divb_cleaning)
// Initialize members of base class
- : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, nodal),
+ : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, grid_type),
m_spectral_index(spectral_index),
m_dt(dt),
m_dive_cleaning(dive_cleaning),
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPmlRZ.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPmlRZ.H
index 6ea63c5a2..255b645ba 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPmlRZ.H
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPmlRZ.H
@@ -20,7 +20,7 @@ class PsatdAlgorithmPmlRZ : public SpectralBaseAlgorithmRZ
amrex::DistributionMapping const & dm,
const SpectralFieldIndex& spectral_index,
int const n_rz_azimuthal_modes, int const norder_z,
- bool const nodal, amrex::Real const dt_step);
+ short const grid_type, amrex::Real const dt_step);
// Redefine functions from base class
virtual void pushSpectralFields (SpectralFieldDataRZ & f) override final;
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPmlRZ.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPmlRZ.cpp
index ca85648ac..60f7209de 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPmlRZ.cpp
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPmlRZ.cpp
@@ -21,9 +21,9 @@ PsatdAlgorithmPmlRZ::PsatdAlgorithmPmlRZ (SpectralKSpaceRZ const & spectral_kspa
amrex::DistributionMapping const & dm,
const SpectralFieldIndex& spectral_index,
int const n_rz_azimuthal_modes, int const norder_z,
- bool const nodal, amrex::Real const dt)
+ short const grid_type, amrex::Real const dt)
// Initialize members of base class
- : SpectralBaseAlgorithmRZ(spectral_kspace, dm, spectral_index, norder_z, nodal),
+ : SpectralBaseAlgorithmRZ(spectral_kspace, dm, spectral_index, norder_z, grid_type),
m_spectral_index(spectral_index),
m_dt(dt)
{
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.H
index 097a1a9d6..350db81c3 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.H
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.H
@@ -20,7 +20,7 @@ class PsatdAlgorithmRZ : public SpectralBaseAlgorithmRZ
amrex::DistributionMapping const & dm,
const SpectralFieldIndex& spectral_index,
int const n_rz_azimuthal_modes, int const norder_z,
- bool const nodal, amrex::Real const dt_step,
+ short const grid_type, amrex::Real const dt_step,
bool const update_with_rho,
const bool time_averaging,
const int J_in_time,
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp
index effb1cc2b..46ac2c0b8 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp
@@ -21,7 +21,7 @@ PsatdAlgorithmRZ::PsatdAlgorithmRZ (SpectralKSpaceRZ const & spectral_kspace,
amrex::DistributionMapping const & dm,
const SpectralFieldIndex& spectral_index,
int const n_rz_azimuthal_modes, int const norder_z,
- bool const nodal, amrex::Real const dt,
+ short const grid_type, amrex::Real const dt,
bool const update_with_rho,
const bool time_averaging,
const int J_in_time,
@@ -29,7 +29,7 @@ PsatdAlgorithmRZ::PsatdAlgorithmRZ (SpectralKSpaceRZ const & spectral_kspace,
const bool dive_cleaning,
const bool divb_cleaning)
// Initialize members of base class
- : SpectralBaseAlgorithmRZ(spectral_kspace, dm, spectral_index, norder_z, nodal),
+ : SpectralBaseAlgorithmRZ(spectral_kspace, dm, spectral_index, norder_z, grid_type),
m_spectral_index(spectral_index),
m_dt(dt),
m_update_with_rho(update_with_rho),
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.H
index ef08c4432..fe624e9b8 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.H
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.H
@@ -83,7 +83,7 @@ class SpectralBaseAlgorithm
const amrex::DistributionMapping& dm,
const SpectralFieldIndex& spectral_index,
const int norder_x, const int norder_y,
- const int norder_z, const bool nodal);
+ const int norder_z, const short grid_type);
SpectralFieldIndex m_spectral_index;
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.cpp
index 1a2334cc6..099f6edf3 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.cpp
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.cpp
@@ -31,15 +31,15 @@ SpectralBaseAlgorithm::SpectralBaseAlgorithm(const SpectralKSpace& spectral_kspa
const amrex::DistributionMapping& dm,
const SpectralFieldIndex& spectral_index,
const int norder_x, const int norder_y,
- const int norder_z, const bool nodal):
+ const int norder_z, const short grid_type):
m_spectral_index(spectral_index),
// Compute and assign the modified k vectors
- modified_kx_vec(spectral_kspace.getModifiedKComponent(dm,0,norder_x,nodal)),
+ modified_kx_vec(spectral_kspace.getModifiedKComponent(dm,0,norder_x,grid_type)),
#if defined(WARPX_DIM_3D)
- modified_ky_vec(spectral_kspace.getModifiedKComponent(dm,1,norder_y,nodal)),
- modified_kz_vec(spectral_kspace.getModifiedKComponent(dm,2,norder_z,nodal))
+ modified_ky_vec(spectral_kspace.getModifiedKComponent(dm,1,norder_y,grid_type)),
+ modified_kz_vec(spectral_kspace.getModifiedKComponent(dm,2,norder_z,grid_type))
#else
- modified_kz_vec(spectral_kspace.getModifiedKComponent(dm,1,norder_z,nodal))
+ modified_kz_vec(spectral_kspace.getModifiedKComponent(dm,1,norder_z,grid_type))
#endif
{
#if !defined(WARPX_DIM_3D)
diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.H
index a2e293dc2..37bb862a9 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.H
+++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.H
@@ -63,10 +63,10 @@ class SpectralBaseAlgorithmRZ
SpectralBaseAlgorithmRZ(SpectralKSpaceRZ const & spectral_kspace,
amrex::DistributionMapping const & dm,
const SpectralFieldIndex& spectral_index,
- int const norder_z, bool const nodal)
+ int const norder_z, short const grid_type)
// Compute and assign the modified k vectors
: m_spectral_index(spectral_index),
- modified_kz_vec(spectral_kspace.getModifiedKComponent(dm, 1, norder_z, nodal))
+ modified_kz_vec(spectral_kspace.getModifiedKComponent(dm, 1, norder_z, grid_type))
{}
SpectralFieldIndex m_spectral_index;
diff --git a/Source/FieldSolver/SpectralSolver/SpectralKSpace.H b/Source/FieldSolver/SpectralSolver/SpectralKSpace.H
index 68aa7931a..d5266b2a2 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralKSpace.H
+++ b/Source/FieldSolver/SpectralSolver/SpectralKSpace.H
@@ -58,7 +58,7 @@ class SpectralKSpace
const int i_dim, const bool only_positive_k ) const;
KVectorComponent getModifiedKComponent(
const amrex::DistributionMapping& dm, const int i_dim,
- const int n_order, const bool nodal ) const;
+ const int n_order, const short grid_type ) const;
SpectralShiftFactor getSpectralShiftFactor(
const amrex::DistributionMapping& dm, const int i_dim,
const int shift_type ) const;
diff --git a/Source/FieldSolver/SpectralSolver/SpectralKSpace.cpp b/Source/FieldSolver/SpectralSolver/SpectralKSpace.cpp
index b9afc63b4..8330b645e 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralKSpace.cpp
+++ b/Source/FieldSolver/SpectralSolver/SpectralKSpace.cpp
@@ -141,7 +141,7 @@ SpectralKSpace::getKComponent( const DistributionMapping& dm,
* corresponding correcting "shift" factor, along the dimension
* specified by `i_dim`.
*
- * (By default, we assume the FFT is done from/to a nodal grid in real space
+ * (By default, we assume the FFT is done from/to a collocated grid in real space
* It the FFT is performed from/to a cell-centered grid in real space,
* a correcting "shift" factor must be applied in spectral space.)
*/
@@ -190,14 +190,13 @@ SpectralKSpace::getSpectralShiftFactor( const DistributionMapping& dm,
*
* \param n_order Order of accuracy of the stencil, in discretizing
* a spatial derivative
- * \param nodal Whether the stencil is to be applied to a nodal or
- staggered set of fields
+ * \param grid_type type of grid (collocated or not)
*/
KVectorComponent
SpectralKSpace::getModifiedKComponent( const DistributionMapping& dm,
const int i_dim,
const int n_order,
- const bool nodal ) const
+ const short grid_type ) const
{
// Initialize an empty DeviceVector in each box
KVectorComponent modified_k_comp(spectralspace_ba, dm);
@@ -217,7 +216,7 @@ SpectralKSpace::getModifiedKComponent( const DistributionMapping& dm,
} else {
// Compute real-space stencil coefficients
- Vector<Real> h_stencil_coef = WarpX::getFornbergStencilCoefficients(n_order, nodal);
+ Vector<Real> h_stencil_coef = WarpX::getFornbergStencilCoefficients(n_order, grid_type);
Gpu::DeviceVector<Real> d_stencil_coef(h_stencil_coef.size());
Gpu::copyAsync(Gpu::hostToDevice, h_stencil_coef.begin(), h_stencil_coef.end(),
d_stencil_coef.begin());
@@ -243,7 +242,7 @@ SpectralKSpace::getModifiedKComponent( const DistributionMapping& dm,
{
p_modified_k[i] = 0;
for (int n=0; n<nstencil; n++){
- if (nodal){
+ if (grid_type == GridType::Collocated){
p_modified_k[i] += p_stencil_coef[n]*
std::sin( p_k[i]*(n+1)*delta_x )/( (n+1)*delta_x );
} else {
@@ -252,12 +251,12 @@ SpectralKSpace::getModifiedKComponent( const DistributionMapping& dm,
}
}
- // By construction, at finite order and for a nodal grid,
+ // By construction, at finite order and for a collocated grid,
// the *modified* k corresponding to the Nyquist frequency
// (i.e. highest *real* k) is 0. However, the above calculation
// based on stencil coefficients does not give 0 to machine precision.
// Therefore, we need to enforce the fact that the modified k be 0 here.
- if (nodal){
+ if (grid_type == GridType::Collocated){
if (i_dim == 0){
// Because of the real-to-complex FFTs, the first axis (idim=0)
// contains only the positive k, and the Nyquist frequency is
diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolver.H b/Source/FieldSolver/SpectralSolver/SpectralSolver.H
index 66fe1b816..4ff9654ed 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralSolver.H
+++ b/Source/FieldSolver/SpectralSolver/SpectralSolver.H
@@ -46,7 +46,7 @@ class SpectralSolver
* \param[in] norder_x spectral order along x
* \param[in] norder_y spectral order along y
* \param[in] norder_z spectral order along z
- * \param[in] nodal whether the spectral solver is applied to a nodal or staggered grid
+ * \param[in] grid_type type of grid (collocated or not)
* \param[in] v_galilean three-dimensional vector containing the components of the Galilean
* velocity for the standard or averaged Galilean PSATD solvers
* \param[in] v_comoving three-dimensional vector containing the components of the comoving
@@ -73,7 +73,7 @@ class SpectralSolver
const amrex::BoxArray& realspace_ba,
const amrex::DistributionMapping& dm,
const int norder_x, const int norder_y,
- const int norder_z, const bool nodal,
+ const int norder_z, const short grid_type,
const amrex::Vector<amrex::Real>& v_galilean,
const amrex::Vector<amrex::Real>& v_comoving,
const amrex::RealVect dx,
diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp b/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp
index f29a0bd03..362af06a3 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp
+++ b/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp
@@ -25,7 +25,7 @@ SpectralSolver::SpectralSolver(
const amrex::BoxArray& realspace_ba,
const amrex::DistributionMapping& dm,
const int norder_x, const int norder_y,
- const int norder_z, const bool nodal,
+ const int norder_z, const short grid_type,
const amrex::Vector<amrex::Real>& v_galilean,
const amrex::Vector<amrex::Real>& v_comoving,
const amrex::RealVect dx, const amrex::Real dt,
@@ -55,7 +55,7 @@ SpectralSolver::SpectralSolver(
if (pml) // PSATD equations in the PML region
{
algorithm = std::make_unique<PsatdAlgorithmPml>(
- k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, nodal,
+ k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, grid_type,
dt, dive_cleaning, divb_cleaning);
}
else // PSATD equations in the regular domain
@@ -64,14 +64,14 @@ SpectralSolver::SpectralSolver(
if (v_comoving[0] != 0. || v_comoving[1] != 0. || v_comoving[2] != 0.)
{
algorithm = std::make_unique<PsatdAlgorithmComoving>(
- k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, nodal,
+ k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, grid_type,
v_comoving, dt, update_with_rho);
}
// Galilean PSATD algorithm (only J constant in time)
else if (v_galilean[0] != 0. || v_galilean[1] != 0. || v_galilean[2] != 0.)
{
algorithm = std::make_unique<PsatdAlgorithmJConstantInTime>(
- k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, nodal,
+ k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, grid_type,
v_galilean, dt, update_with_rho, fft_do_time_averaging,
dive_cleaning, divb_cleaning);
}
@@ -88,7 +88,7 @@ SpectralSolver::SpectralSolver(
const bool div_cleaning = (dive_cleaning && divb_cleaning);
algorithm = std::make_unique<PsatdAlgorithmFirstOrder>(
- k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, nodal,
+ k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, grid_type,
dt, div_cleaning, J_in_time, rho_in_time);
}
else if (psatd_solution_type == PSATDSolutionType::SecondOrder)
@@ -96,14 +96,14 @@ SpectralSolver::SpectralSolver(
if (J_in_time == JInTime::Constant)
{
algorithm = std::make_unique<PsatdAlgorithmJConstantInTime>(
- k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, nodal,
+ k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, grid_type,
v_galilean, dt, update_with_rho, fft_do_time_averaging,
dive_cleaning, divb_cleaning);
}
else if (J_in_time == JInTime::Linear)
{
algorithm = std::make_unique<PsatdAlgorithmJLinearInTime>(
- k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, nodal,
+ k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, grid_type,
dt, fft_do_time_averaging, dive_cleaning, divb_cleaning);
}
}
diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H
index 45b55d9d4..4a6c15630 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H
+++ b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H
@@ -31,7 +31,7 @@ class SpectralSolverRZ
amrex::BoxArray const & realspace_ba,
amrex::DistributionMapping const & dm,
int const n_rz_azimuthal_modes,
- int const norder_z, bool const nodal,
+ int const norder_z, short const grid_type,
const amrex::Vector<amrex::Real>& v_galilean,
amrex::RealVect const dx, amrex::Real const dt,
bool const with_pml,
diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp
index 23d93fe04..9b0d2f21f 100644
--- a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp
+++ b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp
@@ -20,7 +20,7 @@
*
* \param n_rz_azimuthal_modes Number of azimuthal modes
* \param norder_z Order of accuracy of the spatial derivatives along z
- * \param nodal Whether the solver is applied to a nodal or staggered grid
+ * \param grid_type Type of grid (collocated or not)
* \param dx Cell size along each dimension
* \param dt Time step
* \param with_pml Whether PML boundary will be used
@@ -29,7 +29,7 @@ SpectralSolverRZ::SpectralSolverRZ (const int lev,
amrex::BoxArray const & realspace_ba,
amrex::DistributionMapping const & dm,
int const n_rz_azimuthal_modes,
- int const norder_z, bool const nodal,
+ int const norder_z, short const grid_type,
const amrex::Vector<amrex::Real>& v_galilean,
amrex::RealVect const dx, amrex::Real const dt,
bool const with_pml,
@@ -56,17 +56,17 @@ SpectralSolverRZ::SpectralSolverRZ (const int lev,
// Initialize the corresponding coefficients over k space
if (with_pml) {
PML_algorithm = std::make_unique<PsatdAlgorithmPmlRZ>(
- k_space, dm, m_spectral_index, n_rz_azimuthal_modes, norder_z, nodal, dt);
+ k_space, dm, m_spectral_index, n_rz_azimuthal_modes, norder_z, grid_type, dt);
}
if (v_galilean[2] == 0) {
// v_galilean is 0: use standard PSATD algorithm
algorithm = std::make_unique<PsatdAlgorithmRZ>(
- k_space, dm, m_spectral_index, n_rz_azimuthal_modes, norder_z, nodal, dt,
+ k_space, dm, m_spectral_index, n_rz_azimuthal_modes, norder_z, grid_type, dt,
update_with_rho, fft_do_time_averaging, J_in_time, rho_in_time, dive_cleaning, divb_cleaning);
} else {
// Otherwise: use the Galilean algorithm
algorithm = std::make_unique<PsatdAlgorithmGalileanRZ>(
- k_space, dm, m_spectral_index, n_rz_azimuthal_modes, norder_z, nodal, v_galilean, dt, update_with_rho);
+ k_space, dm, m_spectral_index, n_rz_azimuthal_modes, norder_z, grid_type, v_galilean, dt, update_with_rho);
}
// - Initialize arrays for fields in spectral space + FFT plans
diff --git a/Source/FieldSolver/WarpX_QED_Field_Pushers.cpp b/Source/FieldSolver/WarpX_QED_Field_Pushers.cpp
index e134ede7f..bd6886480 100644
--- a/Source/FieldSolver/WarpX_QED_Field_Pushers.cpp
+++ b/Source/FieldSolver/WarpX_QED_Field_Pushers.cpp
@@ -46,9 +46,9 @@ void
WarpX::Hybrid_QED_Push (amrex::Vector<amrex::Real> a_dt)
{
WARPX_ALWAYS_ASSERT_WITH_MESSAGE(
- WarpX::do_nodal != 0,
+ WarpX::grid_type == GridType::Collocated,
"Error: The Hybrid QED method is "
- "currently only compatible with the nodal scheme."
+ "currently only implemented on a collocated grid."
);
for (int lev = 0; lev <= finest_level; ++lev) {
Hybrid_QED_Push(lev, a_dt[lev]);
diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp
index 43bd597bd..cf5eda4ea 100644
--- a/Source/Initialization/WarpXInitData.cpp
+++ b/Source/Initialization/WarpXInitData.cpp
@@ -267,14 +267,15 @@ WarpX::PrintMainPICparameters ()
}
#endif // WARPX_USE_PSATD
- if (do_nodal==1){
- amrex::Print() << " | - nodal mode \n";
+ if (grid_type == GridType::Collocated){
+ amrex::Print() << " | - collocated grid \n";
}
#ifdef WARPX_USE_PSATD
- if ( (do_nodal==0) && (field_gathering_algo == GatheringAlgo::EnergyConserving) ){
- amrex::Print()<<" | - staggered mode " << "\n";
+ if ( (grid_type == GridType::Staggered) && (field_gathering_algo == GatheringAlgo::EnergyConserving) ){
+ amrex::Print()<<" | - staggered grid " << "\n";
}
- else if ( (do_nodal==0) && (field_gathering_algo == GatheringAlgo::MomentumConserving) ){
+ else if ( (grid_type == GridType::Hybrid) && (field_gathering_algo == GatheringAlgo::MomentumConserving) ){
+ amrex::Print()<<" | - hybrid grid " << "\n";
if (dims=="3"){
amrex::Print() << " | - field_centering_nox = " << WarpX::field_centering_nox << "\n";
amrex::Print() << " | - field_centering_noy = " << WarpX::field_centering_noy << "\n";
@@ -490,7 +491,7 @@ WarpX::InitPML ()
// to the PML, for example in the presence of mesh refinement patches)
pml[0] = std::make_unique<PML>(0, boxArray(0), DistributionMap(0), &Geom(0), nullptr,
pml_ncell, pml_delta, amrex::IntVect::TheZeroVector(),
- dt[0], nox_fft, noy_fft, noz_fft, do_nodal,
+ dt[0], nox_fft, noy_fft, noz_fft, grid_type,
do_moving_window, pml_has_particles, do_pml_in_domain,
psatd_solution_type, J_in_time, rho_in_time,
do_pml_dive_cleaning, do_pml_divb_cleaning,
@@ -527,7 +528,7 @@ WarpX::InitPML ()
pml[lev] = std::make_unique<PML>(lev, boxArray(lev), DistributionMap(lev),
&Geom(lev), &Geom(lev-1),
pml_ncell, pml_delta, refRatio(lev-1),
- dt[lev], nox_fft, noy_fft, noz_fft, do_nodal,
+ dt[lev], nox_fft, noy_fft, noz_fft, grid_type,
do_moving_window, pml_has_particles, do_pml_in_domain,
psatd_solution_type, J_in_time, rho_in_time, do_pml_dive_cleaning, do_pml_divb_cleaning,
amrex::IntVect(0), amrex::IntVect(0),
diff --git a/Source/Parallelization/GuardCellManager.H b/Source/Parallelization/GuardCellManager.H
index 35df85514..bb0861975 100644
--- a/Source/Parallelization/GuardCellManager.H
+++ b/Source/Parallelization/GuardCellManager.H
@@ -28,7 +28,7 @@ public:
* \param dx cell spacing
* \param do_subcycling bool, whether to use subcycling
* \param do_fdtd_nci_corr bool, whether to use Godfrey NCI corrector
- * \param do_nodal bool, whether the field solver is nodal
+ * \param grid_type integer, whether the grid is collocated or staggered
* \param do_moving_window bool, whether to use moving window
* \param moving_window_dir direction of moving window
* \param nox order of current deposition
@@ -55,7 +55,7 @@ public:
const amrex::RealVect dx,
const bool do_subcycling,
const bool do_fdtd_nci_corr,
- const bool do_nodal,
+ const short grid_type,
const bool do_moving_window,
const int moving_window_dir,
const int nox,
diff --git a/Source/Parallelization/GuardCellManager.cpp b/Source/Parallelization/GuardCellManager.cpp
index aa9d9f448..5c151412b 100644
--- a/Source/Parallelization/GuardCellManager.cpp
+++ b/Source/Parallelization/GuardCellManager.cpp
@@ -36,7 +36,7 @@ guardCellManager::Init (
const amrex::RealVect dx,
const bool do_subcycling,
const bool do_fdtd_nci_corr,
- const bool do_nodal,
+ const short grid_type,
const bool do_moving_window,
const int moving_window_dir,
const int nox,
@@ -195,9 +195,9 @@ guardCellManager::Init (
// currents in the latter case). This does not seem to be necessary in x and y,
// where it still seems fine to set half the number of guard cells of the nodal case.
- int ngFFt_x = do_nodal ? nox_fft : nox_fft / 2;
- int ngFFt_y = do_nodal ? noy_fft : noy_fft / 2;
- int ngFFt_z = (do_nodal || galilean) ? noz_fft : noz_fft / 2;
+ int ngFFt_x = (grid_type == GridType::Collocated) ? nox_fft : nox_fft / 2;
+ int ngFFt_y = (grid_type == GridType::Collocated) ? noy_fft : noy_fft / 2;
+ int ngFFt_z = (grid_type == GridType::Collocated || galilean) ? noz_fft : noz_fft / 2;
ParmParse pp_psatd("psatd");
utils::parser::queryWithParser(pp_psatd, "nx_guard", ngFFt_x);
@@ -258,7 +258,7 @@ guardCellManager::Init (
}
#else
else {
- if (do_nodal) {
+ if (grid_type == GridType::Collocated) {
ng_FieldSolver = CartesianNodalAlgorithm::GetMaxGuardCell();
ng_FieldSolverF = CartesianNodalAlgorithm::GetMaxGuardCell();
ng_FieldSolverG = CartesianNodalAlgorithm::GetMaxGuardCell();
diff --git a/Source/Particles/MultiParticleContainer.cpp b/Source/Particles/MultiParticleContainer.cpp
index 9cbe898cd..726e44b04 100644
--- a/Source/Particles/MultiParticleContainer.cpp
+++ b/Source/Particles/MultiParticleContainer.cpp
@@ -1311,9 +1311,9 @@ MultiParticleContainer::doQEDSchwinger ()
auto & warpx = WarpX::GetInstance();
- WARPX_ALWAYS_ASSERT_WITH_MESSAGE(warpx.do_nodal ||
+ WARPX_ALWAYS_ASSERT_WITH_MESSAGE(warpx.grid_type == GridType::Collocated ||
warpx.field_gathering_algo == GatheringAlgo::MomentumConserving,
- "ERROR: Schwinger process only implemented for warpx.do_nodal = 1"
+ "ERROR: Schwinger process only implemented for warpx.grid_type=collocated"
"or algo.field_gathering = momentum-conserving");
constexpr int level_0 = 0;
diff --git a/Source/Particles/WarpXParticleContainer.cpp b/Source/Particles/WarpXParticleContainer.cpp
index 21ab1408c..8eec0314d 100644
--- a/Source/Particles/WarpXParticleContainer.cpp
+++ b/Source/Particles/WarpXParticleContainer.cpp
@@ -430,8 +430,8 @@ WarpXParticleContainer::DepositCurrent (WarpXParIter& pti,
const std::array<amrex::Real, 3>& xyzmin = WarpX::LowerCorner(tilebox, depos_lev, 0.5_rt*dt);
if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Esirkepov) {
- if (WarpX::do_nodal==1) {
- amrex::Abort("The Esirkepov algorithm cannot be used with a nodal grid.");
+ if (WarpX::grid_type == GridType::Collocated) {
+ amrex::Abort("The Esirkepov algorithm cannot be used with a collocated grid.");
}
}
diff --git a/Source/Utils/RelativeCellPosition.cpp b/Source/Utils/RelativeCellPosition.cpp
index 55e933af0..1a108b54b 100644
--- a/Source/Utils/RelativeCellPosition.cpp
+++ b/Source/Utils/RelativeCellPosition.cpp
@@ -19,7 +19,7 @@ utils::getRelativeCellPosition(amrex::MultiFab const& mf)
// amrex::CellIndex::CELL means: 0.5 from lower corner for that index/direction
// amrex::CellIndex::NODE means: at corner for that index/direction
- // WarpX::do_nodal means: all indices/directions on CellIndex::NODE
+ // WarpX::grid_type==GridType::Collocated means: all indices/directions on CellIndex::NODE
for (int d = 0; d < AMREX_SPACEDIM; d++)
{
if (idx_type.cellCentered(d))
diff --git a/Source/Utils/WarpXAlgorithmSelection.H b/Source/Utils/WarpXAlgorithmSelection.H
index fe90cb196..d0d8678f5 100644
--- a/Source/Utils/WarpXAlgorithmSelection.H
+++ b/Source/Utils/WarpXAlgorithmSelection.H
@@ -36,6 +36,14 @@ struct MacroscopicSolverAlgo {
};
};
+struct GridType {
+ enum {
+ Collocated = 0,
+ Staggered = 1,
+ Hybrid = 2
+ };
+};
+
struct ElectromagneticSolverAlgo {
enum {
None = 0,
diff --git a/Source/Utils/WarpXAlgorithmSelection.cpp b/Source/Utils/WarpXAlgorithmSelection.cpp
index d3b5f5e43..99c068fc0 100644
--- a/Source/Utils/WarpXAlgorithmSelection.cpp
+++ b/Source/Utils/WarpXAlgorithmSelection.cpp
@@ -22,6 +22,13 @@
// Define dictionary with correspondance between user-input strings,
// and corresponding integer for use inside the code
+const std::map<std::string, int> grid_to_int = {
+ {"collocated", GridType::Collocated},
+ {"staggered", GridType::Staggered},
+ {"hybrid", GridType::Hybrid},
+ {"default", GridType::Staggered}
+};
+
const std::map<std::string, int> electromagnetic_solver_algo_to_int = {
{"none", ElectromagneticSolverAlgo::None },
{"yee", ElectromagneticSolverAlgo::Yee },
@@ -140,6 +147,8 @@ GetAlgorithmInteger( amrex::ParmParse& pp, const char* pp_search_key ){
std::map<std::string, int> algo_to_int;
if (0 == std::strcmp(pp_search_key, "maxwell_solver")) {
algo_to_int = electromagnetic_solver_algo_to_int;
+ } else if (0 == std::strcmp(pp_search_key, "grid_type")) {
+ algo_to_int = grid_to_int;
} else if (0 == std::strcmp(pp_search_key, "do_electrostatic")) {
algo_to_int = electrostatic_solver_algo_to_int;
} else if (0 == std::strcmp(pp_search_key, "particle_pusher")) {
diff --git a/Source/WarpX.H b/Source/WarpX.H
index da885132d..2effc8a6d 100644
--- a/Source/WarpX.H
+++ b/Source/WarpX.H
@@ -323,8 +323,9 @@ public:
//! and current onto the lower refinement level instead of the refinement patch itself
static int n_current_deposition_buffer;
- //! If true, all fields are evaluated on a nodal grid and all MultiFabs have a nodal index type
- static bool do_nodal;
+ //! Integer that corresponds to the type of grid used in the simulation
+ //! (collocated, staggered, hybrid)
+ static short grid_type;
// Global rho nodal flag to know about rho index type when rho MultiFab is not allocated
amrex::IntVect m_rho_nodal_flag;
@@ -969,10 +970,9 @@ public:
* (up to order \c n_order).
*
* \param[in] n_order order of the finite-difference approximation
- * \param[in] nodal whether the finite-difference approximation is computed
- * on a nodal grid or a staggered grid
+ * \param[in] a_grid_type type of grid (collocated or not)
*/
- static amrex::Vector<amrex::Real> getFornbergStencilCoefficients(const int n_order, const bool nodal);
+ static amrex::Vector<amrex::Real> getFornbergStencilCoefficients(const int n_order, const short a_grid_type);
// Device vectors of stencil coefficients used for finite-order centering of fields
amrex::Gpu::DeviceVector<amrex::Real> device_field_centering_stencil_coeffs_x;
@@ -1211,13 +1211,15 @@ private:
* \param[in] centering_nox order of the finite-order centering along x
* \param[in] centering_noy order of the finite-order centering along y
* \param[in] centering_noz order of the finite-order centering along z
+ * \param[in] a_grid_type type of grid (collocated or not)
*/
void AllocateCenteringCoefficients (amrex::Gpu::DeviceVector<amrex::Real>& device_centering_stencil_coeffs_x,
amrex::Gpu::DeviceVector<amrex::Real>& device_centering_stencil_coeffs_y,
amrex::Gpu::DeviceVector<amrex::Real>& device_centering_stencil_coeffs_z,
const int centering_nox,
const int centering_noy,
- const int centering_noz);
+ const int centering_noz,
+ const short a_grid_type);
void AllocLevelMFs (int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm,
const amrex::IntVect& ngEB, amrex::IntVect& ngJ,
diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp
index 0f57cbee1..1e48d659d 100644
--- a/Source/WarpX.cpp
+++ b/Source/WarpX.cpp
@@ -193,7 +193,7 @@ IntVect WarpX::filter_npass_each_dir(1);
int WarpX::n_field_gather_buffer = -1;
int WarpX::n_current_deposition_buffer = -1;
-bool WarpX::do_nodal = false;
+short WarpX::grid_type;
amrex::IntVect m_rho_nodal_flag;
int WarpX::do_similar_dm_pml = 1;
@@ -908,9 +908,12 @@ WarpX::ReadParameters ()
pp_warpx.query("do_dynamic_scheduling", do_dynamic_scheduling);
- pp_warpx.query("do_nodal", do_nodal);
+ // Integer that corresponds to the type of grid used in the simulation
+ // (collocated, staggered, hybrid)
+ grid_type = GetAlgorithmInteger(pp_warpx, "grid_type");
+
// Use same shape factors in all directions, for gathering
- if (do_nodal) galerkin_interpolation = false;
+ if (grid_type == GridType::Collocated) galerkin_interpolation = false;
#ifdef WARPX_DIM_RZ
// Only needs to be set with WARPX_DIM_RZ, otherwise defaults to 1
@@ -919,13 +922,47 @@ WarpX::ReadParameters ()
"The number of azimuthal modes (n_rz_azimuthal_modes) must be at least 1");
#endif
- // If true, the current is deposited on a nodal grid and centered onto a staggered grid.
- // Setting warpx.do_current_centering = 1 makes sense only if warpx.do_nodal = 0. Instead,
- // if warpx.do_nodal = 1, Maxwell's equations are solved on a nodal grid and the current
- // should not be centered onto a staggered grid.
- if (WarpX::do_nodal == 0)
+ // Set default parameters with hybrid grid (parsed later below)
+ if (grid_type == GridType::Hybrid)
+ {
+ // Finite-order centering of fields (staggered to nodal)
+ // Default field gathering algorithm will be set below
+ field_centering_nox = 8;
+ field_centering_noy = 8;
+ field_centering_noz = 8;
+ // Finite-order centering of currents (nodal to staggered)
+ do_current_centering = true;
+ current_centering_nox = 8;
+ current_centering_noy = 8;
+ current_centering_noz = 8;
+ }
+
+ // If true, the current is deposited on a nodal grid and centered onto
+ // a staggered grid. Setting warpx.do_current_centering=1 makes sense
+ // only if warpx.grid_type=hybrid. Instead, if warpx.grid_type=nodal or
+ // warpx.grid_type=staggered, Maxwell's equations are solved either on a
+ // collocated grid or on a staggered grid without current centering.
+ pp_warpx.query("do_current_centering", do_current_centering);
+ if (do_current_centering)
{
- pp_warpx.query("do_current_centering", do_current_centering);
+ WARPX_ALWAYS_ASSERT_WITH_MESSAGE(
+ grid_type == GridType::Hybrid,
+ "warpx.do_current_centering=1 can be used only with warpx.grid_type=hybrid");
+
+ utils::parser::queryWithParser(
+ pp_warpx, "current_centering_nox", current_centering_nox);
+ utils::parser::queryWithParser(
+ pp_warpx, "current_centering_noy", current_centering_noy);
+ utils::parser::queryWithParser(
+ pp_warpx, "current_centering_noz", current_centering_noz);
+
+ AllocateCenteringCoefficients(device_current_centering_stencil_coeffs_x,
+ device_current_centering_stencil_coeffs_y,
+ device_current_centering_stencil_coeffs_z,
+ current_centering_nox,
+ current_centering_noy,
+ current_centering_noz,
+ grid_type);
}
WARPX_ALWAYS_ASSERT_WITH_MESSAGE(
@@ -957,9 +994,9 @@ WarpX::ReadParameters ()
}
if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) {
- // Force do_nodal=true (that is, not staggered) and
- // use same shape factors in all directions, for gathering
- do_nodal = true;
+ // Force grid_type=collocated (neither staggered nor hybrid)
+ // and use same shape factors in all directions for gathering
+ grid_type = GridType::Collocated;
galerkin_interpolation = false;
}
#endif
@@ -998,12 +1035,40 @@ WarpX::ReadParameters ()
"Vay deposition not implemented with multi-J algorithm");
}
+ // Query algo.field_gathering from input, set field_gathering_algo to
+ // "default" if not found (default defined in Utils/WarpXAlgorithmSelection.cpp)
field_gathering_algo = GetAlgorithmInteger(pp_algo, "field_gathering");
- if (field_gathering_algo == GatheringAlgo::MomentumConserving) {
- // Use same shape factors in all directions, for gathering
- galerkin_interpolation = false;
+
+ // Set default field gathering algorithm for hybrid grids (momentum-conserving)
+ std::string tmp_algo;
+ // - algo.field_gathering not found in the input
+ // - field_gathering_algo set to "default" above
+ // (default defined in Utils/WarpXAlgorithmSelection.cpp)
+ // - reset default value here for hybrid grids
+ if (pp_algo.query("field_gathering", tmp_algo) == false)
+ {
+ if (grid_type == GridType::Hybrid)
+ {
+ field_gathering_algo = GatheringAlgo::MomentumConserving;
+ }
+ }
+ // - algo.field_gathering found in the input
+ // - field_gathering_algo read above and set to user-defined value
+ else
+ {
+ if (grid_type == GridType::Hybrid)
+ {
+ WARPX_ALWAYS_ASSERT_WITH_MESSAGE(
+ field_gathering_algo == GatheringAlgo::MomentumConserving,
+ "Hybrid grid (warpx.grid_type=hybrid) should be used only with "
+ "momentum-conserving field gathering algorithm "
+ "(algo.field_gathering=momentum-conserving)");
+ }
}
+ // Use same shape factors in all directions, for gathering
+ if (field_gathering_algo == GatheringAlgo::MomentumConserving) galerkin_interpolation = false;
+
em_solver_medium = GetAlgorithmInteger(pp_algo, "em_solver_medium");
if (em_solver_medium == MediumForEM::Macroscopic ) {
macroscopic_solver_algo = GetAlgorithmInteger(pp_algo,"macroscopic_sigma_method");
@@ -1106,62 +1171,43 @@ WarpX::ReadParameters ()
ParmParse pp_interpolation("interpolation");
pp_interpolation.query("galerkin_scheme",galerkin_interpolation);
+ }
- // Read order of finite-order centering of fields (staggered to nodal).
- // Read this only if warpx.do_nodal = 0. Instead, if warpx.do_nodal = 1,
- // Maxwell's equations are solved on a nodal grid and the electromagnetic
- // forces are gathered from a nodal grid, hence the fields do not need to
- // be centered onto a nodal grid.
- if (WarpX::field_gathering_algo == GatheringAlgo::MomentumConserving &&
- WarpX::do_nodal == 0)
- {
- utils::parser::queryWithParser(
- pp_interpolation, "field_centering_nox", field_centering_nox);
- utils::parser::queryWithParser(
- pp_interpolation, "field_centering_noy", field_centering_noy);
- utils::parser::queryWithParser(
- pp_interpolation, "field_centering_noz", field_centering_noz);
- }
+ {
+ ParmParse pp_warpx("warpx");
- // Read order of finite-order centering of currents (nodal to staggered)
- if (WarpX::do_current_centering)
+ // If warpx.grid_type=staggered or warpx.grid_type=hybrid,
+ // and algo.field_gathering=momentum-conserving, the fields are solved
+ // on a staggered grid and centered onto a nodal grid for gathering.
+ // Instead, if warpx.grid_type=collocated, the momentum-conserving and
+ // energy conserving field gathering algorithms are equivalent (forces
+ // gathered from the collocated grid) and no fields centering occurs.
+ if (WarpX::field_gathering_algo == GatheringAlgo::MomentumConserving &&
+ WarpX::grid_type != GridType::Collocated)
{
utils::parser::queryWithParser(
- pp_interpolation, "current_centering_nox", current_centering_nox);
+ pp_warpx, "field_centering_nox", field_centering_nox);
utils::parser::queryWithParser(
- pp_interpolation, "current_centering_noy", current_centering_noy);
+ pp_warpx, "field_centering_noy", field_centering_noy);
utils::parser::queryWithParser(
- pp_interpolation, "current_centering_noz", current_centering_noz);
- }
-
- // Finite-order centering is not implemented with mesh refinement
- // (note that when WarpX::do_nodal = 1 finite-order centering is not used anyways)
- if (maxLevel() > 0 && WarpX::do_nodal == 0)
- {
- WARPX_ALWAYS_ASSERT_WITH_MESSAGE(
- field_centering_nox == 2 && field_centering_noy == 2 && field_centering_noz == 2,
- "High-order centering of fields (order > 2) is not implemented with mesh refinement");
- }
+ pp_warpx, "field_centering_noz", field_centering_noz);
- if (WarpX::field_gathering_algo == GatheringAlgo::MomentumConserving &&
- WarpX::do_nodal == 0)
- {
AllocateCenteringCoefficients(device_field_centering_stencil_coeffs_x,
device_field_centering_stencil_coeffs_y,
device_field_centering_stencil_coeffs_z,
field_centering_nox,
field_centering_noy,
- field_centering_noz);
+ field_centering_noz,
+ grid_type);
}
- if (WarpX::do_current_centering)
+ // Finite-order centering is not implemented with mesh refinement
+ // (note that when warpx.grid_type=collocated, finite-order centering is not used anyways)
+ if (maxLevel() > 0 && WarpX::grid_type != GridType::Collocated)
{
- AllocateCenteringCoefficients(device_current_centering_stencil_coeffs_x,
- device_current_centering_stencil_coeffs_y,
- device_current_centering_stencil_coeffs_z,
- current_centering_nox,
- current_centering_noy,
- current_centering_noz);
+ WARPX_ALWAYS_ASSERT_WITH_MESSAGE(
+ field_centering_nox == 2 && field_centering_noy == 2 && field_centering_noz == 2,
+ "High-order centering of fields (order > 2) is not implemented with mesh refinement");
}
}
@@ -1805,7 +1851,7 @@ WarpX::AllocLevelData (int lev, const BoxArray& ba, const DistributionMapping& d
dx,
do_subcycling,
WarpX::use_fdtd_nci_corr,
- do_nodal,
+ grid_type,
do_moving_window,
moving_window_dir,
WarpX::nox,
@@ -1920,7 +1966,7 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm
G_nodal_flag = amrex::IntVect::TheCellVector();
// Overwrite nodal flags if necessary
- if (do_nodal) {
+ if (grid_type == GridType::Collocated) {
Ex_nodal_flag = IntVect::TheNodeVector();
Ey_nodal_flag = IntVect::TheNodeVector();
Ez_nodal_flag = IntVect::TheNodeVector();
@@ -2171,13 +2217,13 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm
#endif
} // ElectromagneticSolverAlgo::PSATD
else {
- m_fdtd_solver_fp[lev] = std::make_unique<FiniteDifferenceSolver>(electromagnetic_solver_id, dx, do_nodal);
+ m_fdtd_solver_fp[lev] = std::make_unique<FiniteDifferenceSolver>(electromagnetic_solver_id, dx, grid_type);
}
//
// The Aux patch (i.e., the full solution)
//
- if (aux_is_nodal and !do_nodal)
+ if (aux_is_nodal and grid_type != GridType::Collocated)
{
// Create aux multifabs on Nodal Box Array
BoxArray const nba = amrex::convert(ba,IntVect::TheNodeVector());
@@ -2266,11 +2312,11 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm
if (do_divb_cleaning)
{
- if (do_nodal)
+ if (grid_type == GridType::Collocated)
{
AllocInitMultiFab(G_cp[lev], amrex::convert(cba, IntVect::TheUnitVector()), dm, ncomps, ngG, tag("G_cp"), 0.0_rt);
}
- else // do_nodal = 0
+ else // grid_type=staggered or grid_type=hybrid
{
AllocInitMultiFab(G_cp[lev], amrex::convert(cba, IntVect::TheZeroVector()), dm, ncomps, ngG, tag("G_cp"), 0.0_rt);
}
@@ -2314,7 +2360,7 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm
} // ElectromagneticSolverAlgo::PSATD
else {
m_fdtd_solver_cp[lev] = std::make_unique<FiniteDifferenceSolver>(electromagnetic_solver_id, cdx,
- do_nodal);
+ grid_type);
}
}
@@ -2401,7 +2447,7 @@ void WarpX::AllocLevelSpectralSolverRZ (amrex::Vector<std::unique_ptr<SpectralSo
dm,
n_rz_azimuthal_modes,
noz_fft,
- do_nodal,
+ grid_type,
m_v_galilean,
dx_vect,
solver_dt,
@@ -2455,7 +2501,7 @@ void WarpX::AllocLevelSpectralSolver (amrex::Vector<std::unique_ptr<SpectralSolv
nox_fft,
noy_fft,
noz_fft,
- do_nodal,
+ grid_type,
m_v_galilean,
m_v_comoving,
dx_vect,
@@ -2557,9 +2603,8 @@ WarpX::ComputeDivB (amrex::MultiFab& divB, int const dcomp,
const std::array<const amrex::MultiFab* const, 3>& B,
const std::array<amrex::Real,3>& dx)
{
- WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!do_nodal,
- "ComputeDivB not implemented with do_nodal."
- "Shouldn't be too hard to make it general with class FiniteDifferenceSolver");
+ WARPX_ALWAYS_ASSERT_WITH_MESSAGE(grid_type != GridType::Collocated,
+ "ComputeDivB not implemented with warpx.grid_type=Collocated.");
Real dxinv = 1._rt/dx[0], dyinv = 1._rt/dx[1], dzinv = 1._rt/dx[2];
@@ -2595,9 +2640,8 @@ WarpX::ComputeDivB (amrex::MultiFab& divB, int const dcomp,
const std::array<const amrex::MultiFab* const, 3>& B,
const std::array<amrex::Real,3>& dx, IntVect const ngrow)
{
- WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!do_nodal,
- "ComputeDivB not implemented with do_nodal."
- "Shouldn't be too hard to make it general with class FiniteDifferenceSolver");
+ WARPX_ALWAYS_ASSERT_WITH_MESSAGE(grid_type != GridType::Collocated,
+ "ComputeDivB not implemented with warpx.grid_type=collocated.");
Real dxinv = 1._rt/dx[0], dyinv = 1._rt/dx[1], dzinv = 1._rt/dx[2];
@@ -2795,7 +2839,7 @@ WarpX::BuildBufferMasksInBox ( const amrex::Box tbx, amrex::IArrayBox &buffer_ma
#endif
}
-amrex::Vector<amrex::Real> WarpX::getFornbergStencilCoefficients(const int n_order, const bool nodal)
+amrex::Vector<amrex::Real> WarpX::getFornbergStencilCoefficients(const int n_order, const short a_grid_type)
{
AMREX_ALWAYS_ASSERT_WITH_MESSAGE(n_order % 2 == 0, "n_order must be even");
@@ -2807,8 +2851,8 @@ amrex::Vector<amrex::Real> WarpX::getFornbergStencilCoefficients(const int n_ord
// an overflow when evaluated numerically. One way to avoid the overflow is
// to calculate the coefficients by recurrence.
- // Coefficients for nodal (centered) finite-difference approximation
- if (nodal == true)
+ // Coefficients for collocated (nodal) finite-difference approximation
+ if (a_grid_type == GridType::Collocated)
{
// First coefficient
coeffs.at(0) = m * 2. / (m+1);
@@ -2856,7 +2900,8 @@ void WarpX::AllocateCenteringCoefficients (amrex::Gpu::DeviceVector<amrex::Real>
amrex::Gpu::DeviceVector<amrex::Real>& device_centering_stencil_coeffs_z,
const int centering_nox,
const int centering_noy,
- const int centering_noz)
+ const int centering_noz,
+ const short a_grid_type)
{
// Vectors of Fornberg stencil coefficients
amrex::Vector<amrex::Real> Fornberg_stencil_coeffs_x;
@@ -2868,9 +2913,9 @@ void WarpX::AllocateCenteringCoefficients (amrex::Gpu::DeviceVector<amrex::Real>
amrex::Vector<amrex::Real> host_centering_stencil_coeffs_y;
amrex::Vector<amrex::Real> host_centering_stencil_coeffs_z;
- Fornberg_stencil_coeffs_x = getFornbergStencilCoefficients(centering_nox, false);
- Fornberg_stencil_coeffs_y = getFornbergStencilCoefficients(centering_noy, false);
- Fornberg_stencil_coeffs_z = getFornbergStencilCoefficients(centering_noz, false);
+ Fornberg_stencil_coeffs_x = getFornbergStencilCoefficients(centering_nox, a_grid_type);
+ Fornberg_stencil_coeffs_y = getFornbergStencilCoefficients(centering_noy, a_grid_type);
+ Fornberg_stencil_coeffs_z = getFornbergStencilCoefficients(centering_noz, a_grid_type);
host_centering_stencil_coeffs_x.resize(centering_nox);
host_centering_stencil_coeffs_y.resize(centering_noy);