aboutsummaryrefslogtreecommitdiff
path: root/Source
diff options
context:
space:
mode:
Diffstat (limited to 'Source')
-rw-r--r--Source/Diagnostics/FieldIO.cpp238
-rw-r--r--Source/WarpX.H12
-rw-r--r--Source/WarpX.cpp50
3 files changed, 146 insertions, 154 deletions
diff --git a/Source/Diagnostics/FieldIO.cpp b/Source/Diagnostics/FieldIO.cpp
index b1181f22f..5c523804e 100644
--- a/Source/Diagnostics/FieldIO.cpp
+++ b/Source/Diagnostics/FieldIO.cpp
@@ -10,6 +10,21 @@
using namespace amrex;
+namespace
+{
+ // Return true if any element in elems is in vect, false otherwise
+ static bool is_in_vector(std::vector<std::string> vect,
+ std::vector<std::string> elems)
+ {
+ bool value = false;
+ for (std::string elem : elems){
+ if (std::find(vect.begin(), vect.end(), elem) != vect.end())
+ value = true;
+ }
+ return value;
+ }
+}
+
#ifdef WARPX_USE_OPENPMD
/** \brief For a given field that is to be written to an openPMD file,
* set the metadata that indicates the physical unit.
@@ -296,152 +311,130 @@ AverageAndPackScalarField( MultiFab& mf_avg,
*/
void
WarpX::AverageAndPackFields ( Vector<std::string>& varnames,
- amrex::Vector<MultiFab>& mf_avg, const int ngrow) const
+ amrex::Vector<MultiFab>& mf_avg, const int ngrow) const
{
// Count how many different fields should be written (ncomp)
- const int ncomp = 0
- + static_cast<int>(plot_E_field)*3
- + static_cast<int>(plot_B_field)*3
- + static_cast<int>(plot_J_field)*3
- + static_cast<int>(plot_part_per_cell)
- + static_cast<int>(plot_part_per_grid)
- + static_cast<int>(plot_part_per_proc)
- + static_cast<int>(plot_proc_number)
- + static_cast<int>(plot_divb)
- + static_cast<int>(plot_dive)
- + static_cast<int>(plot_rho)
- + static_cast<int>(plot_F)
+ const int ncomp = fields_to_plot.size()
+ static_cast<int>(plot_finepatch)*6
+ static_cast<int>(plot_crsepatch)*6
+ static_cast<int>(costs[0] != nullptr);
-
+
// Loop over levels of refinement
for (int lev = 0; lev <= finest_level; ++lev)
{
// Allocate pointers to the `ncomp` fields that will be added
mf_avg.push_back( MultiFab(grids[lev], dmap[lev], ncomp, ngrow));
- // Go through the different fields, pack them into mf_avg[lev],
- // add the corresponding names to `varnames` and increment dcomp
- int dcomp = 0;
- if (plot_J_field) {
- AverageAndPackVectorField(mf_avg[lev], current_fp[lev], dcomp, ngrow);
- if(lev==0) for(auto name:{"jx","jy","jz"}) varnames.push_back(name);
- dcomp += 3;
+ // For E, B and J, if at least one component is requested,
+ // build cell-centered temporary MultiFab with 3 comps
+ MultiFab mf_tmp_E, mf_tmp_B, mf_tmp_J;
+ // Build mf_tmp_E is at least one component of E is requested
+ if (is_in_vector(fields_to_plot, {"Ex", "Ey", "Ez"} )){
+ // Allocate temp MultiFab with 3 components
+ mf_tmp_E = MultiFab(grids[lev], dmap[lev], 3, ngrow);
+ // Fill MultiFab mf_tmp_E with averaged E
+ AverageAndPackVectorField(mf_tmp_E, Efield_aux[lev], 0, ngrow);
}
- if (plot_E_field) {
- AverageAndPackVectorField(mf_avg[lev], Efield_aux[lev], dcomp, ngrow);
- if(lev==0) for(auto name:{"Ex","Ey","Ez"}) varnames.push_back(name);
- dcomp += 3;
+ // Same for B
+ if (is_in_vector(fields_to_plot, {"Bx", "By", "Bz"} )){
+ mf_tmp_B = MultiFab(grids[lev], dmap[lev], 3, ngrow);
+ AverageAndPackVectorField(mf_tmp_B, Bfield_aux[lev], 0, ngrow);
}
- if (plot_B_field) {
- AverageAndPackVectorField(mf_avg[lev], Bfield_aux[lev], dcomp, ngrow);
- if(lev==0) for(auto name:{"Bx","By","Bz"}) varnames.push_back(name);
- dcomp += 3;
+ // Same for J
+ if (is_in_vector(fields_to_plot, {"jx", "jy", "jz"} )){
+ mf_tmp_J = MultiFab(grids[lev], dmap[lev], 3, ngrow);
+ AverageAndPackVectorField(mf_tmp_J, current_fp[lev], 0, ngrow);
}
- if (plot_part_per_cell)
- {
- MultiFab temp_dat(grids[lev],mf_avg[lev].DistributionMap(),1,0);
- temp_dat.setVal(0);
-
- // MultiFab containing number of particles in each cell
- mypc->Increment(temp_dat, lev);
- AverageAndPackScalarField( mf_avg[lev], temp_dat, dcomp, ngrow );
- if(lev==0) varnames.push_back("part_per_cell");
- dcomp += 1;
- }
-
- if (plot_part_per_grid)
- {
- const Vector<long>& npart_in_grid = mypc->NumberOfParticlesInGrid(lev);
- // MultiFab containing number of particles per grid
- // (stored as constant for all cells in each grid)
+ int dcomp;
+ // Go through the different fields in fields_to_plot, pack them into
+ // mf_avg[lev] add the corresponding names to `varnames`.
+ // plot_fine_patch and plot_coarse_patch are treated separately
+ // (after this for loop).
+ for (dcomp=0; dcomp<fields_to_plot.size(); dcomp++){
+ std::string fieldname = fields_to_plot[dcomp];
+ if(lev==0) varnames.push_back(fieldname);
+ if (fieldname == "Ex"){
+ MultiFab::Copy( mf_avg[lev], mf_tmp_E, 0, dcomp, 1, ngrow);
+ } else if (fieldname == "Ey"){
+ MultiFab::Copy( mf_avg[lev], mf_tmp_E, 1, dcomp, 1, ngrow);
+ } else if (fieldname == "Ez"){
+ MultiFab::Copy( mf_avg[lev], mf_tmp_E, 2, dcomp, 1, ngrow);
+ } else if (fieldname == "Bx"){
+ MultiFab::Copy( mf_avg[lev], mf_tmp_B, 0, dcomp, 1, ngrow);
+ } else if (fieldname == "By"){
+ MultiFab::Copy( mf_avg[lev], mf_tmp_B, 1, dcomp, 1, ngrow);
+ } else if (fieldname == "Bz"){
+ MultiFab::Copy( mf_avg[lev], mf_tmp_B, 2, dcomp, 1, ngrow);
+ } else if (fieldname == "jx"){
+ MultiFab::Copy( mf_avg[lev], mf_tmp_J, 0, dcomp, 1, ngrow);
+ } else if (fieldname == "jy"){
+ MultiFab::Copy( mf_avg[lev], mf_tmp_J, 1, dcomp, 1, ngrow);
+ } else if (fieldname == "jz"){
+ MultiFab::Copy( mf_avg[lev], mf_tmp_J, 2, dcomp, 1, ngrow);
+ } else if (fieldname == "rho"){
+ AverageAndPackScalarField( mf_avg[lev], *rho_fp[lev], dcomp, ngrow );
+ } else if (fieldname == "F"){
+ AverageAndPackScalarField( mf_avg[lev], *F_fp[lev], dcomp, ngrow);
+ } else if (fieldname == "part_per_cell") {
+ MultiFab temp_dat(grids[lev],mf_avg[lev].DistributionMap(),1,0);
+ temp_dat.setVal(0);
+ // MultiFab containing number of particles in each cell
+ mypc->Increment(temp_dat, lev);
+ AverageAndPackScalarField( mf_avg[lev], temp_dat, dcomp, ngrow );
+ } else if (fieldname == "part_per_grid"){
+ const Vector<long>& npart_in_grid = mypc->NumberOfParticlesInGrid(lev);
+ // MultiFab containing number of particles per grid
+ // (stored as constant for all cells in each grid)
#ifdef _OPENMP
#pragma omp parallel
#endif
- for (MFIter mfi(mf_avg[lev]); mfi.isValid(); ++mfi) {
- (mf_avg[lev])[mfi].setVal(static_cast<Real>(npart_in_grid[mfi.index()]),
- dcomp);
- }
- if(lev==0) varnames.push_back("part_per_grid");
- dcomp += 1;
- }
-
- if (plot_part_per_proc)
- {
- const Vector<long>& npart_in_grid = mypc->NumberOfParticlesInGrid(lev);
- // MultiFab containing number of particles per process
- // (stored as constant for all cells in each grid)
- long n_per_proc = 0;
+ for (MFIter mfi(mf_avg[lev]); mfi.isValid(); ++mfi) {
+ (mf_avg[lev])[mfi].setVal(static_cast<Real>(npart_in_grid[mfi.index()]),
+ dcomp);
+ }
+ } else if (fieldname == "part_per_proc"){
+ const Vector<long>& npart_in_grid = mypc->NumberOfParticlesInGrid(lev);
+ // MultiFab containing number of particles per process
+ // (stored as constant for all cells in each grid)
+ long n_per_proc = 0;
#ifdef _OPENMP
#pragma omp parallel reduction(+:n_per_proc)
#endif
- for (MFIter mfi(mf_avg[lev]); mfi.isValid(); ++mfi) {
- n_per_proc += npart_in_grid[mfi.index()];
- }
- mf_avg[lev].setVal(static_cast<Real>(n_per_proc), dcomp,1);
- if(lev==0) varnames.push_back("part_per_proc");
- dcomp += 1;
- }
-
- if (plot_proc_number)
- {
- // MultiFab containing the Processor ID
+ for (MFIter mfi(mf_avg[lev]); mfi.isValid(); ++mfi) {
+ n_per_proc += npart_in_grid[mfi.index()];
+ }
+ mf_avg[lev].setVal(static_cast<Real>(n_per_proc), dcomp,1);
+ } else if (fieldname == "proc_number"){
+ // MultiFab containing the Processor ID
#ifdef _OPENMP
#pragma omp parallel
#endif
- for (MFIter mfi(mf_avg[lev]); mfi.isValid(); ++mfi) {
- (mf_avg[lev])[mfi].setVal(static_cast<Real>(ParallelDescriptor::MyProc()),
- dcomp);
+ for (MFIter mfi(mf_avg[lev]); mfi.isValid(); ++mfi) {
+ (mf_avg[lev])[mfi].setVal(static_cast<Real>(ParallelDescriptor::MyProc()),
+ dcomp);
+ }
+ } else if (fieldname == "divB"){
+ if (do_nodal) amrex::Abort("TODO: do_nodal && plot divb");
+ ComputeDivB(mf_avg[lev], dcomp,
+ {Bfield_aux[lev][0].get(),
+ Bfield_aux[lev][1].get(),
+ Bfield_aux[lev][2].get()},
+ WarpX::CellSize(lev) );
+ } else if (fieldname == "divE"){
+ if (do_nodal) amrex::Abort("TODO: do_nodal && plot dive");
+ const BoxArray& ba = amrex::convert(boxArray(lev),IntVect::TheUnitVector());
+ MultiFab dive(ba,DistributionMap(lev),1,0);
+ ComputeDivE( dive, 0,
+ {Efield_aux[lev][0].get(),
+ Efield_aux[lev][1].get(),
+ Efield_aux[lev][2].get()},
+ WarpX::CellSize(lev) );
+ AverageAndPackScalarField( mf_avg[lev], dive, dcomp, ngrow );
+ } else {
+ amrex::Abort("unknown field in fields_to_plot: " + fieldname);
}
- if(lev==0) varnames.push_back("proc_number");
- dcomp += 1;
- }
-
- if (plot_divb)
- {
- if (do_nodal) amrex::Abort("TODO: do_nodal && plot_divb");
- ComputeDivB(mf_avg[lev], dcomp,
- {Bfield_aux[lev][0].get(),
- Bfield_aux[lev][1].get(),
- Bfield_aux[lev][2].get()},
- WarpX::CellSize(lev)
- );
- if(lev == 0) varnames.push_back("divB");
- dcomp += 1;
}
-
- if (plot_dive)
- {
- if (do_nodal) amrex::Abort("TODO: do_nodal && plot_dive");
- const BoxArray& ba = amrex::convert(boxArray(lev),IntVect::TheUnitVector());
- MultiFab dive(ba,DistributionMap(lev),1,0);
- ComputeDivE( dive, 0,
- {Efield_aux[lev][0].get(),
- Efield_aux[lev][1].get(),
- Efield_aux[lev][2].get()},
- WarpX::CellSize(lev)
- );
- AverageAndPackScalarField( mf_avg[lev], dive, dcomp, ngrow );
- if(lev == 0) varnames.push_back("divE");
- dcomp += 1;
- }
-
- if (plot_rho)
- {
- AverageAndPackScalarField( mf_avg[lev], *rho_fp[lev], dcomp, ngrow );
- if(lev == 0) varnames.push_back("rho");
- dcomp += 1;
- }
-
- if (plot_F)
- {
- AverageAndPackScalarField( mf_avg[lev], *F_fp[lev], dcomp, ngrow);
- if(lev == 0) varnames.push_back("F");
- dcomp += 1;
- }
-
if (plot_finepatch)
{
AverageAndPackVectorField( mf_avg[lev], Efield_fp[lev], dcomp, ngrow );
@@ -490,9 +483,8 @@ WarpX::AverageAndPackFields ( Vector<std::string>& varnames,
dcomp += 1;
}
- BL_ASSERT(dcomp == ncomp);
-
- } // end loop over levels of refinement
+ BL_ASSERT(dcomp == ncomp);
+ } // end loop over levels of refinement
};
diff --git a/Source/WarpX.H b/Source/WarpX.H
index 35de5c88d..a25eef9e4 100644
--- a/Source/WarpX.H
+++ b/Source/WarpX.H
@@ -536,25 +536,15 @@ private:
bool dump_plotfiles = true;
bool dump_openpmd = false;
#endif
- bool plot_E_field = true;
- bool plot_B_field = true;
- bool plot_J_field = true;
- bool plot_part_per_cell = true;
- bool plot_part_per_grid = false;
- bool plot_part_per_proc = false;
- bool plot_proc_number = false;
- bool plot_dive = false;
- bool plot_divb = false;
bool plot_rho = false;
- bool plot_F = false;
bool plot_finepatch = false;
bool plot_crsepatch = false;
bool plot_raw_fields = false;
bool plot_raw_fields_guards = false;
+ amrex::Vector<std::string> fields_to_plot;
int plot_coarsening_ratio = 1;
amrex::VisMF::Header::Version checkpoint_headerversion = amrex::VisMF::Header::NoFabHeader_v1;
-// amrex::VisMF::Header::Version plotfile_headerversion = amrex::VisMF::Header::NoFabHeader_v1;
amrex::VisMF::Header::Version plotfile_headerversion = amrex::VisMF::Header::Version_v1;
amrex::VisMF::Header::Version slice_plotfile_headerversion = amrex::VisMF::Header::Version_v1;
diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp
index 1c0d01ce5..1f5ade13a 100644
--- a/Source/WarpX.cpp
+++ b/Source/WarpX.cpp
@@ -387,36 +387,46 @@ WarpX::ReadParameters ()
pp.query("dump_plotfiles", dump_plotfiles);
pp.query("plot_raw_fields", plot_raw_fields);
pp.query("plot_raw_fields_guards", plot_raw_fields_guards);
- if (ParallelDescriptor::NProcs() == 1) {
- plot_proc_number = false;
- }
- // Fields to dump into plotfiles
- pp.query("plot_E_field" , plot_E_field);
- pp.query("plot_B_field" , plot_B_field);
- pp.query("plot_J_field" , plot_J_field);
- pp.query("plot_part_per_cell", plot_part_per_cell);
- pp.query("plot_part_per_grid", plot_part_per_grid);
- pp.query("plot_part_per_proc", plot_part_per_proc);
- pp.query("plot_proc_number" , plot_proc_number);
- pp.query("plot_dive" , plot_dive);
- pp.query("plot_divb" , plot_divb);
- pp.query("plot_rho" , plot_rho);
- pp.query("plot_F" , plot_F);
pp.query("plot_coarsening_ratio", plot_coarsening_ratio);
+ bool user_fields_to_plot;
+ user_fields_to_plot = pp.queryarr("fields_to_plot", fields_to_plot);
+ if (not user_fields_to_plot){
+ // If not specified, set default values
+ fields_to_plot = {"Ex", "Ey", "Ez", "Bx", "By",
+ "Bz", "jx", "jy", "jz",
+ "part_per_cell"};
+ }
+ // set plot_rho to true of the users requests it, so that
+ // rho is computed at each iteration.
+ if (std::find(fields_to_plot.begin(), fields_to_plot.end(), "rho")
+ != fields_to_plot.end()){
+ plot_rho = true;
+ }
+ // Sanity check if user requests to plot F
+ if (std::find(fields_to_plot.begin(), fields_to_plot.end(), "F")
+ != fields_to_plot.end()){
+ AMREX_ALWAYS_ASSERT_WITH_MESSAGE(do_dive_cleaning,
+ "plot F only works if warpx.do_dive_cleaning = 1");
+ }
+ // If user requests to plot proc_number for a serial run,
+ // delete proc_number from fields_to_plot
+ if (ParallelDescriptor::NProcs() == 1){
+ fields_to_plot.erase(std::remove(fields_to_plot.begin(),
+ fields_to_plot.end(),
+ "proc_number"),
+ fields_to_plot.end());
+ }
// Check that the coarsening_ratio can divide the blocking factor
for (int lev=0; lev<maxLevel(); lev++){
for (int comp=0; comp<AMREX_SPACEDIM; comp++){
if ( blockingFactor(lev)[comp] % plot_coarsening_ratio != 0 ){
- amrex::Abort("plot_coarsening_ratio should be an integer divisor of the blocking factor.");
+ amrex::Abort("plot_coarsening_ratio should be an integer "
+ "divisor of the blocking factor.");
}
}
}
- if (plot_F){
- AMREX_ALWAYS_ASSERT_WITH_MESSAGE(do_dive_cleaning,
- "plot_F only works if warpx.do_dive_cleaning = 1");
- }
pp.query("plot_finepatch", plot_finepatch);
if (maxLevel() > 0) {
pp.query("plot_crsepatch", plot_crsepatch);