aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Docs/source/conf.py2
-rw-r--r--Docs/source/running_cpp/parameters.rst15
-rw-r--r--Docs/source/visualization/sensei.rst405
-rw-r--r--Regression/WarpX-tests.ini4
-rw-r--r--Source/WarpX.H9
-rw-r--r--Source/WarpX.cpp42
-rw-r--r--Source/WarpXIO.cpp95
-rw-r--r--Source/WarpXInitData.cpp1
-rw-r--r--Source/WarpXUtil.cpp5
9 files changed, 512 insertions, 66 deletions
diff --git a/Docs/source/conf.py b/Docs/source/conf.py
index 1d4b97c49..ce6f436d3 100644
--- a/Docs/source/conf.py
+++ b/Docs/source/conf.py
@@ -88,6 +88,8 @@ todo_include_todos = False
#
html_theme = 'sphinx_rtd_theme'
+numfig = True
+
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
diff --git a/Docs/source/running_cpp/parameters.rst b/Docs/source/running_cpp/parameters.rst
index 3c579d6fd..6660f0196 100644
--- a/Docs/source/running_cpp/parameters.rst
+++ b/Docs/source/running_cpp/parameters.rst
@@ -376,11 +376,11 @@ Numerics and algorithms
- ``2``: Direct deposition, vectorized
- ``3``: Direct deposition, non-optimized
- .. warning ::
+ .. warning::
- The vectorized Esirkepov deposition
- (``algo.current_deposition=0``) is currently not functional in WarpX.
- All the other methods (``1``, ``2`` and ``3``) are functional.
+ The vectorized versions do not run on GPU. Use
+ ``algo.current_deposition=1`` or ``3`` when running
+ on GPU.
* ``algo.charge_deposition`` (`integer`)
The algorithm for the charge density deposition:
@@ -393,6 +393,10 @@ Numerics and algorithms
- ``0``: Vectorized version
- ``1``: Non-optimized version
+ .. warning::
+
+ The vectorized version does not run on GPU. Use
+ ``algo.field_gather=1`` when running on GPU.
* ``algo.particle_pusher`` (`integer`)
The algorithm for the particle pusher:
@@ -470,6 +474,9 @@ Diagnostics and output
Only used when mesh refinement is activated and ``warpx.plot_raw_fields`` is ``1``.
Whether to output the data of the coarse patch, in the plot files.
+* ``amr.plot_file`` (`string`)
+ Root for output file names. Supports sub-directories. Default `plotfiles/plt`
+
Checkpoints and restart
-----------------------
WarpX supports checkpoints/restart via AMReX.
diff --git a/Docs/source/visualization/sensei.rst b/Docs/source/visualization/sensei.rst
index 4c71035cf..59f879e47 100644
--- a/Docs/source/visualization/sensei.rst
+++ b/Docs/source/visualization/sensei.rst
@@ -9,6 +9,32 @@ SENSEI uses an XML file to select and configure one or more back ends at run
time. Run time selection of the back end via XML means one user can access
Catalyst, another Libsim, yet another Python with no changes to the code.
+System Architecture
+-------------------
+
+.. _sensei_arch:
+.. figure:: https://data.kitware.com/api/v1/item/5c06cd538d777f2179d4aaca/download
+
+ SENSEI's in situ architecture enables use of a diverse of back ends which
+ can be selected at run time via an XML configuration file
+
+The three major architectural components in SENSEI are *data adaptors* which
+present simulation data in SENSEI's data model, *analysis adaptors* which
+present the back end data consumers to the simulation, and *bridge code* from
+which the simulation manages adaptors and periodically pushes data through the
+system. SENSEI comes equipped with a number of analysis adaptors enabling use
+of popular analysis and visualization libraries such as VisIt Libsim, ParaView
+Catalyst, Python, and ADIOS to name a few. AMReX contains SENSEI data adaptors
+and bridge code making it easy to use in AMReX based simulation codes.
+
+SENSEI provides a *configurable analysis adaptor* which uses an XML file to
+select and configure one or more back ends at run time. Run time selection of
+the back end via XML means one user can access Catalyst, another Libsim, yet
+another Python with no changes to the code. This is depicted in figure
+:numref:`sensei_arch`. On the left side of the figure AMReX produces data, the
+bridge code pushes the data through the configurable analysis adaptor to the
+back end that was selected at run time.
+
Compiling with GNU Make
-----------------------
For codes making use of AMReX's build system add the following variable to the
@@ -41,18 +67,109 @@ ParmParse Configuration
-----------------------
Once an AMReX code has been compiled with SENSEI features enabled, it will need
to be enabled and configured at runtime. This is done using ParmParse input file.
-The following 3 ParmParse parameters are used:
+The supported parameters are described in the following table.
+
++-------------------------+------------------------------------------------------+---------+
+| parameter | description | default |
++=========================+======================================================+=========+
+| :code:`insitu.int` | turns in situ processing on or off and controls how | 0 |
+| | often data is processed. | |
++-------------------------+------------------------------------------------------+---------+
+| :code:`insitu.start` | controls when in situ processing starts. | 0 |
++-------------------------+------------------------------------------------------+---------+
+| :code:`insitu.config` | points to the SENSEI XML file which selects and | |
+| | configures the desired back end. | |
++-------------------------+------------------------------------------------------+---------+
+| :code:`insitu.pin_mesh` | when 1 lower left corner of the mesh is pinned to | 0 |
+| | 0.,0.,0. | |
++-------------------------+------------------------------------------------------+---------+
+
+A typical use case is to enabled SENSEI by setting :code:`insitu.int` to be
+greater than 1, and :code:`insitu.config` to point SENSEI to an XML file that
+selects and configures the desired back end.
.. code-block:: python
insitu.int = 2
- insitu.start = 0
- insitu.config = render_iso_catalyst_2d.xml
+ insitu.config = render_iso_catalyst.xml
+
+Back-end Selection and Configuration
+------------------------------------
+The back end is selected and configured at run time using the SENSEI XML file.
+The XML sets parameters specific to SENSEI and to the chosen back end. Many of
+the back ends have sophisticated configuration mechanisms which SENSEI makes
+use of. For example the following XML configuration was used on NERSC's Cori
+with WarpX to render 10 iso surfaces, shown in figure :numref:`lpa_visit`, using
+VisIt Libsim.
+
+.. code-block:: xml
+
+ <sensei>
+ <analysis type="libsim" frequency="1" mode="batch"
+ session="beam_j_pin.session"
+ image-filename="beam_j_pin_%ts" image-width="1200" image-height="900"
+ image-format="png" enabled="1"/>
+ </sensei>
+
+The *session* attribute names a session file that contains VisIt specific
+runtime configuration. The session file is generated using VisIt GUI on a
+representative dataset. Usually this data set is generated in a low resolution
+run of the desired simulation.
+
+.. _lpa_visit:
+.. figure:: https://data.kitware.com/api/v1/item/5c06b4b18d777f2179d4784c/download
+
+ Rendering of 10 3D iso-surfaces of j using VisIt libsim. The upper left
+ quadrant has been clipped away to reveal innner structure.
+
+The same run and visualization was repeated using ParaView Catalyst, shown in
+figure :numref:`lpa_pv`, by providing the following XML configuration.
+
+.. code-block:: xml
+
+ <sensei>
+ <analysis type="catalyst" pipeline="pythonscript"
+ filename="beam_j.py" enabled="1" />
+ </sensei>
+
+Here the *filename* attribute is used to pass Catalyst a Catalyst specific
+configuration that was generated using the ParaView GUI on a representative
+dataset.
+
+.. _lpa_pv:
+.. figure:: https://data.kitware.com/api/v1/item/5c05b6388d777f2179d207ae/download
+
+ Rendering of 10 3D iso-surfaces of j using ParaView Catalyst. The upper left
+ quadrant has been clipped away to reveal innner structure.
+
+The renderings in these runs were configured using a representative dataset
+which was obtained by running the simulation for a few time steps at a lower
+spatial resolution. When using VisIt Libsim the following XML configures the
+VTK writer to write the simulation data in VTK format. At the end of the run a
+:code:`.visit` file that VisIt can open will be generated.
+
+.. code-block:: xml
+
+ <sensei>
+ <analysis type="PosthocIO" mode="visit" writer="xml"
+ ghost_array_name="avtGhostZones" output_dir="./"
+ enabled="1">
+ </analysis>
+ </sensei>
+
+When using ParaView Catalyst the following XML configures the VTK writer to
+write the simulation data in VTK format. At the end of the run a :code:`.pvd`
+file that ParaView can open will be generated.
+
+.. code-block:: xml
+
+ <sensei>
+ <analysis type="PosthocIO" mode="paraview" writer="xml"
+ ghost_array_name="vtkGhostType" output_dir="./"
+ enabled="1">
+ </analysis>
+ </sensei>
-:code:`insitu.int` turns in situ processing on or off and controls how often
-data is processed. :code:`insitu.start` controls when in situ processing
-starts. :code:`insitu.config` points to the SENSEI XML file which selects and
-configures the desired back end.
Obtaining SENSEI
-----------------
@@ -71,7 +188,279 @@ such as VisIt and ParaView installed. The VM is the easiest way to test things
out. It also can be used to see how installs were done and the environment
configured.
+The SENSEI VM can be downloaded here_.
+
+.. _here: https://data.kitware.com/api/v1/file/5be656368d777f21799ee5a6/download
+
+The SENSEI VM uses modules to manage the build and run environment. Load the
+SENSEI modulefile for the back-end you wish to use. The following table
+describes the available installs and which back-ends are supported in each.
+
++-------------------------------+-------------------------------------+
+| modulefile | back-end(s) |
++===============================+=====================================+
+| sensei/2.1.1-catalyst-shared | ParaView Catalyst, ADIOS, Python |
++-------------------------------+-------------------------------------+
+| sensei/2.1.1-libsim-shared | VisIt Libsim, ADIOS, Python |
++-------------------------------+-------------------------------------+
+| sensei/2.1.1-vtk-shared | VTK-m, ADIOS, Python |
++-------------------------------+-------------------------------------+
+
NERSC Cori
~~~~~~~~~~
SENSEI is deployed at NERSC on Cori. The NERSC deployment includes the major
-back ends such as ParaView Catalyst, VisIt Libsim, and Python.
+back ends such as ADIOS, ParaView Catalyst, VisIt Libsim, and Python.
+
+The SENSEI installs uses modules to manage the build and run environment. Load the
+SENSEI modulefile for the back-end you wish to use. The following table
+describes the available installs and which back-ends are supported in each.
+
++-------------------------------+-------------------------------------+
+| modulefile | back-end(s) |
++===============================+=====================================+
+| sensei/2.1.0-catalyst-shared | ParaView Catalyst, ADIOS, Python |
++-------------------------------+-------------------------------------+
+| sensei/2.1.0-libsim-shared | VisIt Libsim, ADIOS, Python |
++-------------------------------+-------------------------------------+
+| sensei/2.1.0-vtk-shared | VTK-m, ADIOS, Python |
++-------------------------------+-------------------------------------+
+
+
+To access the SENSEI modulefiles on cori first add the SENSEI install to the search path:
+
+.. code-block:: bash
+
+ module use /usr/common/software/sensei/modulefiles
+
+
+3D LPA Example
+--------------
+This section shows an example of using SENSEI and three different back ends on
+a 3D LPA simulation. The instructions are specifically for NERSC cori, but also
+work with the SENSEI VM. The primary difference between working through the examples
+on cori or the VM are that different versions of software are installed.
+
+
+Rendering with VisIt Libsim
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+First, log into cori and clone the git repo's.
+
+.. code-block:: bash
+
+ cd $SCRATCH
+ mkdir warpx
+ cd warpx/
+ git clone https://github.com/ECP-WarpX/WarpX.git WarpX-libsim
+ git clone https://github.com/AMReX-Codes/amrex
+ git clone https://bitbucket.org/berkeleylab/picsar.git
+ cd amrex/
+ git checkout development
+ cd ../WarpX-libsim
+ git checkout dev
+ vim GNUmakefile
+
+Next, edit the makefile to turn the SENSEI features on.
+
+.. code-block:: python
+
+ USE_SENSEI_INSITU=TRUE
+
+Then, load the SENSEI VisIt module, bring SENSEI's build requirements into the
+environment, and compile WarpX.
+
+.. code-block:: bash
+
+ module use /usr/common/software/sensei/modulefiles/
+ module load sensei/2.1.0-libsim-shared
+ source sensei_config
+ make -j8
+
+Download the WarpX input deck, SENSEI XML configuration and and VisIt session
+files. The inputs file configures WarpX, the xml file configures SENSEI, and
+the session file configures VisIt. The inputs and xml files are written by
+hand, while the session file is generated in VisIt gui on a representative data
+set.
+
+.. code-block:: bash
+
+ wget https://data.kitware.com/api/v1/item/5c05d48e8d777f2179d22f20/download -O inputs.3d
+ wget https://data.kitware.com/api/v1/item/5c05d4588d777f2179d22f16/download -O beam_j_pin.xml
+ wget https://data.kitware.com/api/v1/item/5c05d4588d777f2179d22f0e/download -O beam_j_pin.session
+
+To run the demo, submit an interactive job to the batch queue, and launch WarpX.
+
+.. code-block:: bash
+
+ salloc -C haswell -N 1 -t 00:30:00 -q debug
+ ./Bin/main3d.gnu.TPROF.MPI.OMP.ex inputs.3d
+
+
+Rendering with ParaView Catalyst
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+First, log into cori and clone the git repo's.
+
+.. code-block:: bash
+
+ cd $SCRATCH
+ mkdir warpx
+ cd warpx/
+ git clone https://github.com/ECP-WarpX/WarpX.git WarpX-catalyst
+ git clone https://github.com/AMReX-Codes/amrex
+ git clone https://bitbucket.org/berkeleylab/picsar.git
+ cd amrex/
+ git checkout development
+ cd ../WarpX-catalyst
+ git checkout dev
+ vim GNUmakefile
+
+Next, edit the makefile to turn the SENSEI features on.
+
+.. code-block:: python
+
+ USE_SENSEI_INSITU=TRUE
+
+Then, load the SENSEI ParaView module, bring SENSEI's build requirements into the
+environment, and compile WarpX.
+
+.. code-block:: bash
+
+ module use /usr/common/software/sensei/modulefiles/
+ module load sensei/2.1.0-catalyst-shared
+ source sensei_config
+ make -j8
+
+Download the WarpX input deck, SENSEI XML configuration and and ParaView session
+files. The inputs file configures WarpX, the xml file configures SENSEI, and
+the session file configures ParaView. The inputs and xml files are written by
+hand, while the session file is generated in ParaView gui on a representative data
+set.
+
+.. code-block:: bash
+
+ wget https://data.kitware.com/api/v1/item/5c05b3fd8d777f2179d2067d/download -O inputs.3d
+ wget https://data.kitware.com/api/v1/item/5c05b3fd8d777f2179d20675/download -O beam_j.xml
+ wget https://data.kitware.com/api/v1/item/5c05b3fc8d777f2179d2066d/download -O beam_j.py
+
+To run the demo, submit an interactive job to the batch queue, and launch WarpX.
+
+.. code-block:: bash
+
+ salloc -C haswell -N 1 -t 00:30:00 -q debug
+ ./Bin/main3d.gnu.TPROF.MPI.OMP.ex inputs.3d
+
+In situ Calculation with Python
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+SENSEI's Python back-end loads a user provided script file containing callbacks
+for :code:`Initialize`, :code:`Execute`, and :code:`Finalize` phases of the run.
+During the execute phase the simulation pushes data through SENSEI. SENSEI forwards
+this data to the user provided Python function. SENSEI's MPI communicator is made
+available to the user's function via a global variable :code:`comm`.
+
+Here is a template for the user provided Python code.
+
+.. code-block:: python
+
+ # YOUR IMPORTS HERE
+
+ # SET DEFAULTS OF GLOBAL VARIABLES THAT INFLUENCE RUNTIME BEHAVIOR HERE
+
+ def Initialize():
+ """ Initialization code """
+ # YOUR CODE HERE
+ return
+
+ def Execute(dataAdaptor):
+ """ Use sensei::DataAdaptor instance passed in
+ dataAdaptor to access and process simulation data """
+ # YOUR CODE HERE
+ return
+
+ def Finalize():
+ """ Finalization code """
+ # YOUR CODE HERE
+ return
+
+:code:`Initialize` and :code:`Finalize` are optional and will be called if
+they are provided. :code:`Execute` is required. SENSEI's DataAdaptor API
+is used to obtain data and metadata from the simulation. Data is through
+VTK Object's. In WarpX the vtkOverlappingAMR VTK dataset is used.
+
+The following script shows a simple integration of a scalar quantity
+over the valid cells of the mesh. The result is saved in a CSV format.
+
+.. code-block:: python
+
+ import numpy as np, matplotlib.pyplot as plt
+ from vtk.util.numpy_support import *
+ from vtk import vtkDataObject
+ import sys
+
+ # default values of control parameters
+ array = ''
+ out_file = ''
+
+ def Initialize():
+ # rank zero writes the result
+ if comm.Get_rank() == 0:
+ fn = out_file if out_file else 'integrate_%s.csv'%(array)
+ f = open(fn, 'w')
+ f.write('# time, %s\n'%(array))
+ f.close()
+ return
+
+ def Execute(adaptor):
+ # get the mesh and arrays we need
+ dobj = adaptor.GetMesh('mesh', False)
+ adaptor.AddArray(dobj, 'mesh', vtkDataObject.CELL, array)
+ adaptor.AddGhostCellsArray(dobj, 'mesh')
+ time = adaptor.GetDataTime()
+
+ # integrate over the local blocks
+ varint = 0.
+ it = dobj.NewIterator()
+ while not it.IsDoneWithTraversal():
+ # get the local data block and its props
+ blk = it.GetCurrentDataObject()
+
+ # get the array container
+ atts = blk.GetCellData()
+
+ # get the data array
+ var = vtk_to_numpy(atts.GetArray(array))
+
+ # get ghost cell mask
+ ghost = vtk_to_numpy(atts.GetArray('vtkGhostType'))
+ ii = np.where(ghost == 0)[0]
+
+ # integrate over valid cells
+ varint = np.sum(var[ii])*np.prod(blk.GetSpacing())
+
+ it.GoToNextItem()
+
+ # reduce integral to rank 0
+ varint = comm.reduce(varint, root=0, op=MPI.SUM)
+
+ # rank zero writes the result
+ if comm.Get_rank() == 0:
+ fn = out_file if out_file else 'integrate_%s.csv'%(array)
+ f = open(fn, 'a+')
+ f.write('%s, %s\n'%(time, varint))
+ f.close()
+ return
+
+The following XML configures SENSEI's Python back-end.
+
+.. code-block:: xml
+
+ <sensei>
+ <analysis type="python" script_file="./integrate.py" enabled="1">
+ <initialize_source>
+ array='rho'
+ out_file='rho.csv'
+ </initialize_source>
+ </analysis>
+ </sensei>
+
+The :code:`script_file` attribute sets the file path to load the user's Python
+code from, and the :code:`initialize_source` element contains Python code that
+controls runtime behavior specific to each user provided script.
diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini
index 3fcdfa047..49f363d8b 100644
--- a/Regression/WarpX-tests.ini
+++ b/Regression/WarpX-tests.ini
@@ -60,7 +60,7 @@ branch = master
[pml_x_yee]
buildDir = .
inputFile = Examples/Tests/PML/inputs2d
-runtime_params = warpx.do_dynamic_scheduling=0 warpx.maxwell_fdtd_solver=yee
+runtime_params = warpx.do_dynamic_scheduling=0 algo.maxwell_fdtd_solver=yee
dim = 2
addToCompileString =
restartTest = 0
@@ -75,7 +75,7 @@ analysisRoutine = Examples/Tests/PML/analysis_pml.py
[pml_x_ckc]
buildDir = .
inputFile = Examples/Tests/PML/inputs2d
-runtime_params = warpx.do_dynamic_scheduling=0 warpx.maxwell_fdtd_solver=ckc
+runtime_params = warpx.do_dynamic_scheduling=0 algo.maxwell_fdtd_solver=ckc
dim = 2
addToCompileString =
restartTest = 0
diff --git a/Source/WarpX.H b/Source/WarpX.H
index 5678ac79f..08fe657b4 100644
--- a/Source/WarpX.H
+++ b/Source/WarpX.H
@@ -354,10 +354,18 @@ private:
const std::array<const amrex::MultiFab*, 3>& B,
const std::array<amrex::Real,3>& dx);
+ static void ComputeDivB (amrex::MultiFab& divB, int dcomp,
+ const std::array<const amrex::MultiFab*, 3>& B,
+ const std::array<amrex::Real,3>& dx, int ngrow);
+
static void ComputeDivE (amrex::MultiFab& divE, int dcomp,
const std::array<const amrex::MultiFab*, 3>& B,
const std::array<amrex::Real,3>& dx);
+ static void ComputeDivE (amrex::MultiFab& divE, int dcomp,
+ const std::array<const amrex::MultiFab*, 3>& B,
+ const std::array<amrex::Real,3>& dx, int ngrow);
+
void SyncCurrent (const std::array<const amrex::MultiFab*,3>& fine,
const std::array< amrex::MultiFab*,3>& crse,
int ref_ratio);
@@ -593,6 +601,7 @@ private:
int insitu_int;
int insitu_start;
std::string insitu_config;
+ int insitu_pin_mesh;
};
#endif
diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp
index 181da6360..1828ebcec 100644
--- a/Source/WarpX.cpp
+++ b/Source/WarpX.cpp
@@ -463,11 +463,13 @@ WarpX::ReadParameters ()
insitu_start = 0;
insitu_int = 0;
insitu_config = "";
+ insitu_pin_mesh = 0;
ParmParse pp("insitu");
pp.query("int", insitu_int);
pp.query("start", insitu_start);
pp.query("config", insitu_config);
+ pp.query("pin_mesh", insitu_pin_mesh);
}
}
@@ -843,6 +845,26 @@ WarpX::ComputeDivB (MultiFab& divB, int dcomp,
}
void
+WarpX::ComputeDivB (MultiFab& divB, int dcomp,
+ const std::array<const MultiFab*, 3>& B,
+ const std::array<Real,3>& dx, int ngrow)
+{
+#ifdef _OPENMP
+#pragma omp parallel
+#endif
+ for (MFIter mfi(divB, true); mfi.isValid(); ++mfi)
+ {
+ Box bx = mfi.growntilebox(ngrow);
+ WRPX_COMPUTE_DIVB(bx.loVect(), bx.hiVect(),
+ BL_TO_FORTRAN_N_ANYD(divB[mfi],dcomp),
+ BL_TO_FORTRAN_ANYD((*B[0])[mfi]),
+ BL_TO_FORTRAN_ANYD((*B[1])[mfi]),
+ BL_TO_FORTRAN_ANYD((*B[2])[mfi]),
+ dx.data());
+ }
+}
+
+void
WarpX::ComputeDivE (MultiFab& divE, int dcomp,
const std::array<const MultiFab*, 3>& E,
const std::array<Real,3>& dx)
@@ -863,6 +885,26 @@ WarpX::ComputeDivE (MultiFab& divE, int dcomp,
}
void
+WarpX::ComputeDivE (MultiFab& divE, int dcomp,
+ const std::array<const MultiFab*, 3>& E,
+ const std::array<Real,3>& dx, int ngrow)
+{
+#ifdef _OPENMP
+#pragma omp parallel
+#endif
+ for (MFIter mfi(divE, true); mfi.isValid(); ++mfi)
+ {
+ Box bx = mfi.growntilebox(ngrow);
+ WRPX_COMPUTE_DIVE(bx.loVect(), bx.hiVect(),
+ BL_TO_FORTRAN_N_ANYD(divE[mfi],dcomp),
+ BL_TO_FORTRAN_ANYD((*E[0])[mfi]),
+ BL_TO_FORTRAN_ANYD((*E[1])[mfi]),
+ BL_TO_FORTRAN_ANYD((*E[2])[mfi]),
+ dx.data());
+ }
+}
+
+void
WarpX::applyFilter (MultiFab& dstmf, const MultiFab& srcmf, int scomp, int dcomp, int ncomp)
{
ncomp = std::min(ncomp, srcmf.nComp());
diff --git a/Source/WarpXIO.cpp b/Source/WarpXIO.cpp
index 70985ac7e..93a359d7c 100644
--- a/Source/WarpXIO.cpp
+++ b/Source/WarpXIO.cpp
@@ -488,11 +488,11 @@ WarpX::UpdateInSitu () const
Vector<const MultiFab*> srcmf(AMREX_SPACEDIM);
PackPlotDataPtrs(srcmf, current_fp[lev]);
int dcomp = 0;
- amrex::average_edge_to_cellcenter(mf[lev], dcomp, srcmf);
+ amrex::average_edge_to_cellcenter(mf[lev], dcomp, srcmf, ngrow);
#if (AMREX_SPACEDIM == 2)
MultiFab::Copy(mf[lev], mf[lev], dcomp+1, dcomp+2, 1, ngrow);
- amrex::average_node_to_cellcenter(mf[lev], dcomp+1, *current_fp[lev][1], 0, 1);
+ amrex::average_node_to_cellcenter(mf[lev], dcomp+1, *current_fp[lev][1], 0, 1, ngrow);
#endif
if (lev == 0)
{
@@ -503,10 +503,10 @@ WarpX::UpdateInSitu () const
dcomp += 3;
PackPlotDataPtrs(srcmf, Efield_aux[lev]);
- amrex::average_edge_to_cellcenter(mf[lev], dcomp, srcmf);
+ amrex::average_edge_to_cellcenter(mf[lev], dcomp, srcmf, ngrow);
#if (AMREX_SPACEDIM == 2)
MultiFab::Copy(mf[lev], mf[lev], dcomp+1, dcomp+2, 1, ngrow);
- amrex::average_node_to_cellcenter(mf[lev], dcomp+1, *Efield_aux[lev][1], 0, 1);
+ amrex::average_node_to_cellcenter(mf[lev], dcomp+1, *Efield_aux[lev][1], 0, 1, ngrow);
#endif
if (lev == 0)
{
@@ -517,10 +517,10 @@ WarpX::UpdateInSitu () const
dcomp += 3;
PackPlotDataPtrs(srcmf, Bfield_aux[lev]);
- amrex::average_face_to_cellcenter(mf[lev], dcomp, srcmf);
+ amrex::average_face_to_cellcenter(mf[lev], dcomp, srcmf, ngrow);
#if (AMREX_SPACEDIM == 2)
MultiFab::Copy(mf[lev], mf[lev], dcomp+1, dcomp+2, 1, ngrow);
- MultiFab::Copy(mf[lev], *Bfield_aux[lev][1], 0, dcomp+1, 1, ngrow);
+ MultiFab::Copy(mf[lev], *Bfield_aux[lev][1], 0, dcomp+1, 1, ngrow);
#endif
if (lev == 0)
{
@@ -532,16 +532,14 @@ WarpX::UpdateInSitu () const
if (plot_part_per_cell)
{
- MultiFab temp_dat(grids[lev],mf[lev].DistributionMap(),1,0);
- temp_dat.setVal(0);
+ MultiFab temp_dat(grids[lev], mf[lev].DistributionMap(), 1, ngrow);
+ temp_dat.setVal(0, ngrow);
// MultiFab containing number of particles in each cell
mypc->Increment(temp_dat, lev);
- MultiFab::Copy(mf[lev], temp_dat, 0, dcomp, 1, 0);
+ MultiFab::Copy(mf[lev], temp_dat, 0, dcomp, 1, ngrow);
if (lev == 0)
- {
varnames.push_back("part_per_cell");
- }
dcomp += 1;
}
@@ -554,14 +552,13 @@ WarpX::UpdateInSitu () const
#ifdef _OPENMP
#pragma omp parallel
#endif
- for (MFIter mfi(mf[lev]); mfi.isValid(); ++mfi) {
- (mf[lev])[mfi].setVal(static_cast<Real>(npart_in_grid[mfi.index()]), dcomp);
- }
- if (lev == 0)
- {
- varnames.push_back("part_per_grid");
- }
- dcomp += 1;
+ for (MFIter mfi(mf[lev]); mfi.isValid(); ++mfi)
+ (mf[lev])[mfi].setVal(static_cast<Real>(npart_in_grid[mfi.index()]), dcomp);
+
+ if (lev == 0)
+ varnames.push_back("part_per_grid");
+
+ dcomp += 1;
}
if (plot_part_per_proc)
@@ -570,30 +567,30 @@ WarpX::UpdateInSitu () const
#ifdef _OPENMP
#pragma omp parallel reduction(+:n_per_proc)
#endif
- for (MFIter mfi(mf[lev]); mfi.isValid(); ++mfi) {
+ for (MFIter mfi(mf[lev]); mfi.isValid(); ++mfi)
n_per_proc += npart_in_grid[mfi.index()];
- }
- mf[lev].setVal(static_cast<Real>(n_per_proc), dcomp,1);
+
+ mf[lev].setVal(static_cast<Real>(n_per_proc), dcomp, ngrow);
+
if (lev == 0)
- {
varnames.push_back("part_per_proc");
- }
+
dcomp += 1;
}
}
if (plot_proc_number)
{
+ Real procid = static_cast<Real>(ParallelDescriptor::MyProc());
#ifdef _OPENMP
#pragma omp parallel
#endif
- for (MFIter mfi(mf[lev]); mfi.isValid(); ++mfi) {
- (mf[lev])[mfi].setVal(static_cast<Real>(ParallelDescriptor::MyProc()), dcomp);
- }
+ for (MFIter mfi(mf[lev]); mfi.isValid(); ++mfi)
+ (mf[lev])[mfi].setVal(procid, dcomp);
+
if (lev == 0)
- {
varnames.push_back("proc_number");
- }
+
dcomp += 1;
}
@@ -601,56 +598,56 @@ WarpX::UpdateInSitu () const
{
ComputeDivB(mf[lev], dcomp,
{Bfield_aux[lev][0].get(),Bfield_aux[lev][1].get(),Bfield_aux[lev][2].get()},
- WarpX::CellSize(lev));
+ WarpX::CellSize(lev), ngrow);
if (lev == 0)
- {
varnames.push_back("divB");
- }
+
dcomp += 1;
}
if (plot_dive)
{
const BoxArray& ba = amrex::convert(boxArray(lev),IntVect::TheUnitVector());
- MultiFab dive(ba,DistributionMap(lev),1,0);
+ MultiFab dive(ba, DistributionMap(lev), 1, ngrow);
+
ComputeDivE(dive, 0,
{Efield_aux[lev][0].get(), Efield_aux[lev][1].get(), Efield_aux[lev][2].get()},
- WarpX::CellSize(lev));
- amrex::average_node_to_cellcenter(mf[lev], dcomp, dive, 0, 1);
+ WarpX::CellSize(lev), ngrow);
+
+ amrex::average_node_to_cellcenter(mf[lev], dcomp, dive, 0, 1, ngrow);
+
if (lev == 0)
- {
varnames.push_back("divE");
- }
+
dcomp += 1;
}
if (plot_rho)
{
- amrex::average_node_to_cellcenter(mf[lev], dcomp, *rho_fp[lev], 0, 1);
+ amrex::average_node_to_cellcenter(mf[lev], dcomp, *rho_fp[lev], 0, 1, ngrow);
if (lev == 0)
- {
varnames.push_back("rho");
- }
+
dcomp += 1;
}
if (plot_F)
{
- amrex::average_node_to_cellcenter(mf[lev], dcomp, *F_fp[lev], 0, 1);
+ amrex::average_node_to_cellcenter(mf[lev], dcomp, *F_fp[lev], 0, 1, ngrow);
+
if (lev == 0)
- {
varnames.push_back("F");
- }
+
dcomp += 1;
}
if (plot_finepatch)
{
PackPlotDataPtrs(srcmf, Efield_fp[lev]);
- amrex::average_edge_to_cellcenter(mf[lev], dcomp, srcmf);
+ amrex::average_edge_to_cellcenter(mf[lev], dcomp, srcmf, ngrow);
#if (AMREX_SPACEDIM == 2)
MultiFab::Copy(mf[lev], mf[lev], dcomp+1, dcomp+2, 1, ngrow);
- amrex::average_node_to_cellcenter(mf[lev], dcomp+1, *Efield_fp[lev][1], 0, 1);
+ amrex::average_node_to_cellcenter(mf[lev], dcomp+1, *Efield_fp[lev][1], 0, 1, ngrow);
#endif
if (lev == 0)
{
@@ -661,7 +658,7 @@ WarpX::UpdateInSitu () const
dcomp += 3;
PackPlotDataPtrs(srcmf, Bfield_fp[lev]);
- amrex::average_face_to_cellcenter(mf[lev], dcomp, srcmf);
+ amrex::average_face_to_cellcenter(mf[lev], dcomp, srcmf, ngrow);
#if (AMREX_SPACEDIM == 2)
MultiFab::Copy(mf[lev], mf[lev], dcomp+1, dcomp+2, 1, ngrow);
MultiFab::Copy(mf[lev], *Bfield_fp[lev][1], 0, dcomp+1, 1, ngrow);
@@ -686,10 +683,10 @@ WarpX::UpdateInSitu () const
{
std::array<std::unique_ptr<MultiFab>, 3> E = getInterpolatedE(lev);
PackPlotDataPtrs(srcmf, E);
- amrex::average_edge_to_cellcenter(mf[lev], dcomp, srcmf);
+ amrex::average_edge_to_cellcenter(mf[lev], dcomp, srcmf, ngrow);
#if (AMREX_SPACEDIM == 2)
MultiFab::Copy(mf[lev], mf[lev], dcomp+1, dcomp+2, 1, ngrow);
- amrex::average_node_to_cellcenter(mf[lev], dcomp+1, *E[1], 0, 1);
+ amrex::average_node_to_cellcenter(mf[lev], dcomp+1, *E[1], 0, 1, ngrow);
#endif
}
if (lev == 0)
@@ -709,7 +706,7 @@ WarpX::UpdateInSitu () const
{
std::array<std::unique_ptr<MultiFab>, 3> B = getInterpolatedB(lev);
PackPlotDataPtrs(srcmf, B);
- amrex::average_face_to_cellcenter(mf[lev], dcomp, srcmf);
+ amrex::average_face_to_cellcenter(mf[lev], dcomp, srcmf, ngrow);
#if (AMREX_SPACEDIM == 2)
MultiFab::Copy(mf[lev], mf[lev], dcomp+1, dcomp+2, 1, ngrow);
MultiFab::Copy(mf[lev], *B[1], 0, dcomp+1, 1, ngrow);
diff --git a/Source/WarpXInitData.cpp b/Source/WarpXInitData.cpp
index 101331e38..496b14e7a 100644
--- a/Source/WarpXInitData.cpp
+++ b/Source/WarpXInitData.cpp
@@ -50,6 +50,7 @@ WarpX::InitData ()
insitu_bridge = new amrex::AmrMeshInSituBridge;
insitu_bridge->setEnabled(insitu_int > 0 ? 1 : 0);
insitu_bridge->setConfig(insitu_config);
+ insitu_bridge->setPinMesh(insitu_pin_mesh);
if (insitu_bridge->initialize())
{
amrex::ErrorStream()
diff --git a/Source/WarpXUtil.cpp b/Source/WarpXUtil.cpp
index eb0eec92b..4a884330a 100644
--- a/Source/WarpXUtil.cpp
+++ b/Source/WarpXUtil.cpp
@@ -38,9 +38,8 @@ void ReadBoostedFrameParameters(Real& gamma_boost, Real& beta_boost,
void ConvertLabParamsToBoost()
{
-
- Real gamma_boost, beta_boost;
- int max_level;
+ Real gamma_boost = 1., beta_boost = 0.;
+ int max_level = 0;
Vector<int> boost_direction {0,0,0};
ReadBoostedFrameParameters(gamma_boost, beta_boost, boost_direction);