aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.azure-pipelines.yml3
-rw-r--r--.zenodo.json5
-rw-r--r--Docs/source/install/dependencies.rst8
-rw-r--r--Docs/source/install/hpc/summit.rst1
-rw-r--r--Docs/source/usage/parameters.rst25
-rwxr-xr-xExamples/Tests/FieldProbe/analysis_field_probe.py57
-rw-r--r--Examples/Tests/FieldProbe/inputs_2d83
-rw-r--r--Examples/Tests/reduced_diags/inputs29
-rw-r--r--Regression/WarpX-tests.ini17
-rw-r--r--Regression/requirements.txt1
-rw-r--r--Source/Diagnostics/ReducedDiags/FieldProbe.H40
-rw-r--r--Source/Diagnostics/ReducedDiags/FieldProbe.cpp364
-rw-r--r--Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp3
-rw-r--r--Source/Evolve/WarpXEvolve.cpp1
-rw-r--r--requirements.txt1
15 files changed, 533 insertions, 105 deletions
diff --git a/.azure-pipelines.yml b/.azure-pipelines.yml
index 807a6ac66..e220c7aff 100644
--- a/.azure-pipelines.yml
+++ b/.azure-pipelines.yml
@@ -76,10 +76,11 @@ jobs:
set -eu -o pipefail
cat /proc/cpuinfo | grep "model name" | sort -u
df -h
+ sudo apt update
sudo apt install -y ccache curl gcc gfortran git g++ ninja-build \
openmpi-bin libopenmpi-dev \
libfftw3-dev libfftw3-mpi-dev libhdf5-openmpi-dev pkg-config make \
- python3 python3-pip python3-venv python3-setuptools libblas-dev liblapack-dev
+ python3 python3-pandas python3-pip python3-venv python3-setuptools libblas-dev liblapack-dev
ccache --set-config=max_size=10.0G
python3 -m pip install --upgrade pip
python3 -m pip install --upgrade setuptools
diff --git a/.zenodo.json b/.zenodo.json
index ba4cae71e..4185c4566 100644
--- a/.zenodo.json
+++ b/.zenodo.json
@@ -105,6 +105,11 @@
},
{
"affiliation": "Lawrence Berkeley National Laboratory",
+ "name": "Rheaume, Tiberius",
+ "orcid": "0000-0002-6710-0650"
+ },
+ {
+ "affiliation": "Lawrence Berkeley National Laboratory",
"name": "Rowan, Michael E.",
"orcid": "0000-0003-2406-1273"
},
diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst
index d2ff50898..0c28b14ff 100644
--- a/Docs/source/install/dependencies.rst
+++ b/Docs/source/install/dependencies.rst
@@ -74,7 +74,7 @@ If you also want to run runtime tests and added Python (``spack add python`` and
.. code-block:: bash
- python3 -m pip install matplotlib yt scipy numpy openpmd-api virtualenv
+ python3 -m pip install matplotlib yt scipy pandas numpy openpmd-api virtualenv
If you want to run the ``./run_test.sh`` :ref:`test script <developers-testing>`, which uses our legacy GNUmake build system, you need to set the following environment hints after ``spack env activate warpx-dev`` for dependent software:
@@ -125,7 +125,7 @@ Without MPI:
.. code-block:: bash
- conda create -n warpx-dev -c conda-forge blaspp ccache cmake compilers git lapackpp openpmd-api python numpy scipy yt fftw matplotlib mamba ninja pip virtualenv
+ conda create -n warpx-dev -c conda-forge blaspp ccache cmake compilers git lapackpp openpmd-api python numpy pandas scipy yt fftw matplotlib mamba ninja pip virtualenv
source activate warpx-dev
# compile WarpX with -DWarpX_MPI=OFF
@@ -134,7 +134,7 @@ With MPI (only Linux/macOS):
.. code-block:: bash
- conda create -n warpx-dev -c conda-forge blaspp ccache cmake compilers git lapackpp "openpmd-api=*=mpi_openmpi*" python numpy scipy yt "fftw=*=mpi_openmpi*" matplotlib mamba ninja openmpi pip virtualenv
+ conda create -n warpx-dev -c conda-forge blaspp ccache cmake compilers git lapackpp "openpmd-api=*=mpi_openmpi*" python numpy pandas scipy yt "fftw=*=mpi_openmpi*" matplotlib mamba ninja openmpi pip virtualenv
source activate warpx-dev
For legacy ``GNUmake`` builds, after each ``source activate warpx-dev``, you also need to set:
@@ -152,7 +152,7 @@ Apt (Debian/Ubuntu)
.. code-block:: bash
sudo apt update
- sudo apt install build-essential ccache cmake g++ git libfftw3-mpi-dev libfftw3-dev libhdf5-openmpi-dev libopenmpi-dev pkg-config python3 python3-matplotlib python3-numpy python3-pip python3-scipy python3-venv
+ sudo apt install build-essential ccache cmake g++ git libfftw3-mpi-dev libfftw3-dev libhdf5-openmpi-dev libopenmpi-dev pkg-config python3 python3-matplotlib python3-numpy python3-pandas python3-pip python3-scipy python3-venv
# optional:
# for CUDA, either install
diff --git a/Docs/source/install/hpc/summit.rst b/Docs/source/install/hpc/summit.rst
index 7b5a67f31..c6cbd081e 100644
--- a/Docs/source/install/hpc/summit.rst
+++ b/Docs/source/install/hpc/summit.rst
@@ -130,6 +130,7 @@ Optionally, download and install Python packages for :ref:`PICMI <usage-picmi>`
python3 -m pip install --upgrade wheel
python3 -m pip install --upgrade cython
python3 -m pip install --upgrade numpy
+ python3 -m pip install --upgrade pandas
python3 -m pip install --upgrade scipy
python3 -m pip install --upgrade mpi4py --no-binary mpi4py
python3 -m pip install --upgrade openpmd-api
diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst
index 176a2b6ed..0039007ce 100644
--- a/Docs/source/usage/parameters.rst
+++ b/Docs/source/usage/parameters.rst
@@ -2132,10 +2132,26 @@ Reduced Diagnostics
* ``FieldProbe``
This type computes the value of each component of the electric and magnetic fields
- and of the Poynting vector (a measure of electromagnetic flux) at a point in the domain.
- The point where the fields are measured is specified through the input parameters
- ``<reduced_diags_name>.x_probe``, ``<reduced_diags_name>.y_probe`` and
- ``<reduced_diags_name>.z_probe``.
+ and of the Poynting vector (a measure of electromagnetic flux) at points in the domain.
+
+ Multiple geometries for point probes can be specified via ``<reduced_diags_name>.probe_geometry = ...``:
+
+ * ``Point`` (default): a single point
+ * ``Line``: a line of points with equal spacing
+ * ``Plane``: a plane of points with equal spacing
+
+ **Point**: The point where the fields are measured is specified through the input parameters ``<reduced_diags_name>.x_probe``, ``<reduced_diags_name>.y_probe`` and ``<reduced_diags_name>.z_probe``.
+
+ **Line**: probe a 1 dimensional line of points to create a line detector.
+ Initial input parameters ``x_probe``, ``y_probe``, and ``z_probe`` designate one end of the line detector, while the far end is specified via ``<reduced_diags_name>.x1_probe``, ``<reduced_diags_name>.y1_probe``, ``<reduced_diags_name>.z1_probe``.
+ Additionally, ``<reduced_diags_name>.resolution`` must be defined to give the number of detector points along the line (equally spaced) to probe.
+
+ **Plane**: probe a 2 dimensional plane of points to create a square plane detector.
+ Initial input parameters ``x_probe``, ``y_probe``, and ``z_probe`` designate the center of the detector.
+ The detector plane is normal to a vector specified by ``<reduced_diags_name>.target_normal_x``, ``<reduced_diags_name>.target_normal_y``, and ``<reduced_diags_name>.target_normal_z``.
+ The top of the plane is perpendicular to an "up" vector denoted by ``<reduced_diags_name>.target_up_x``, ``<reduced_diags_name>.target_up_y``, and ``<reduced_diags_name>.target_up_z``.
+ The detector has a square radius to be determined by ``<reduced_diags_name>.detector_radius``.
+ Similarly to the line detector, the plane detector requires a resolution ``<reduced_diags_name>.resolution``, which denotes the number of detector particles along each side of the square detector.
The output columns are
the value of the :math:`E_x` field,
@@ -2157,7 +2173,6 @@ Reduced Diagnostics
Integrated electric and magnetic field components can instead be obtained by specifying
``<reduced_diags_name>.integrate == true``.
-
* ``RhoMaximum``
This type computes the maximum and minimum values of the total charge density as well as
the maximum absolute value of the charge density of each charged species.
diff --git a/Examples/Tests/FieldProbe/analysis_field_probe.py b/Examples/Tests/FieldProbe/analysis_field_probe.py
new file mode 100755
index 000000000..7d7eaa0eb
--- /dev/null
+++ b/Examples/Tests/FieldProbe/analysis_field_probe.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python3
+#
+# Copyright 2021-2022 Tiberius Rheaume
+#
+# This file is part of WarpX.
+#
+# License: BSD-3-Clause-LBNL
+
+"""
+This script tests the accuracy of the FieldProbe diagnostic by observing a plane
+wave undergoing single slit diffraction. The input file inputs_2d is used. This
+file defines the simulation box, laser pulse, embeded boundary with single slit,
+and line of detector points. The plane wave initializes near the negative Z end
+of the simulation box. The wave interacts with the embeded boundary at Z=0. The
+wave undergoes diffraction at the slit. The electromagnetic flux is calculated
+at the line detector which is placed perpendicular to Z beyond the slit. This
+test will check if the detected EM flux matches expected values,
+which can be solved analytically.
+"""
+import numpy as np
+import pandas as pd
+
+filename = "diags/reducedfiles/FP_line.txt"
+
+# Open data file
+df = pd.read_csv(filename, sep=' ')
+df = df.sort_values(by=['[2]part_x_lev0-(m)'])
+
+# Select position and Intensity of timestep 500
+x = df.query('`[0]step()` == 500')['[2]part_x_lev0-(m)']
+S = df.query('`[0]step()` == 500')['[11]part_S_lev0-(W*s/m^2)']
+xvals = x.to_numpy()
+svals = S.to_numpy()
+
+# Default intensity is highest measured value for plane
+# wave interacting with single slit
+I_0 = np.max(S)
+def I_envelope (x, lam = 0.2e-6, a = 0.3e-6, D = 1.7e-6):
+ arg = np.pi * a / lam * np.sin(np.arctan(x / D))
+ return np.sinc( arg / np.pi )**2
+
+# Count non-outlyer values away from simulation boundaries
+counter = np.arange(60, 140, 2)
+
+# Count average error from expected values
+error = 0
+for a in counter:
+ b = I_0 * I_envelope(xvals[a])
+ c = svals[a]
+ error += abs((c-b)/b) * 100.0
+averror = error / (len(counter) - 1)
+
+# average error range set at 2.5%
+if averror > 2.5:
+ print('Average error greater than 2.5%')
+
+assert averror < 2.5
diff --git a/Examples/Tests/FieldProbe/inputs_2d b/Examples/Tests/FieldProbe/inputs_2d
new file mode 100644
index 000000000..92189d278
--- /dev/null
+++ b/Examples/Tests/FieldProbe/inputs_2d
@@ -0,0 +1,83 @@
+#################################
+# Domain, Resolution & Numerics
+#
+
+# time-scale
+stop_time = 0.016e-12 # [s]
+
+# Coarse resolution with cell size = lambda/16 allows for quick CI tests
+amr.n_cell = 320 192
+
+# simulation box, no MR
+# note: increase z (space & cells) for converging ion energy
+amr.max_level = 0
+geometry.dims = 2
+geometry.prob_lo = -2e-6 -.4e-6 # [m]
+geometry.prob_hi = 2e-6 2e-6
+
+# Boundary condition
+boundary.field_lo = absorbing_silver_mueller absorbing_silver_mueller
+boundary.field_hi = absorbing_silver_mueller absorbing_silver_mueller
+
+# Order of particle shape factors
+algo.particle_shape = 1
+
+# numerical tuning
+warpx.cfl = 0.999
+warpx.use_filter = 1 # bilinear current/charge filter
+
+# field solver yee (default) ckc psatd (fft)
+algo.maxwell_solver = yee
+
+##################################
+## Embedded Boundary
+
+my_constants.sheetRadius = 6.25e-9 # [m] 1/2 cell
+my_constants.slitWidth = 0.3e-6 # [m] width of slit = lambda * 2
+warpx.eb_implicit_function = "(
+ if(abs(z)<=sheetRadius and abs(x)>=(slitWidth/2.0), 1.0, -1.0) )"
+
+#################################
+## Laser Pulse Profile
+#
+# Note: we make the beam really wide so it behaves like a plane wave
+##
+lasers.names = laser1
+laser1.position = 0. 0. -.35e-6 # point the laser plane (antenna)
+laser1.direction = 0. 0. 1. # the plane's (antenna's) normal direction
+laser1.polarization = 1. 0. 0. # the main polarization vector
+laser1.a0 = 0.001 # maximum amplitude of the laser field [V/m]
+laser1.wavelength = .2e-6 # central wavelength of the laser pulse [m]
+laser1.profile = Gaussian
+laser1.profile_waist = 2.2e-5 # beam waist (E(w_0)=E_0/e) [m]
+laser1.profile_duration = 5.e-15 # pulse length (E(tau)=E_0/e; tau=tau_E=FWHM_I/1.17741) [s]
+laser1.profile_t_peak = 5.e-15 # time until peak intensity reached at the laser plane [s]
+laser1.profile_focal_distance = .35e-6 # focal distance from the antenna [m]
+
+#################################
+## Diagnostics
+##
+diagnostics.diags_names = diag1
+diag1.intervals = 100
+diag1.diag_type = Full
+#diag1.format = openpmd
+diag1.fields_to_plot = Ex Ey Ez Bx By Bz
+
+#################################
+## Reduced Diagnostics
+##
+#
+warpx.reduced_diags_names = FP_line
+FP_line.type = FieldProbe
+FP_line.intervals = 100
+FP_line.integrate = 1
+FP_line.probe_geometry = Line
+FP_line.x_probe = -1.5e-6
+FP_line.y_probe = 0.
+FP_line.z_probe = 1.7e-6
+FP_line.x1_probe = 1.5e-6
+FP_line.y1_probe = 0.
+FP_line.z1_probe = 1.7e-6
+FP_line.resolution = 201
+
+authors = "Tiberius Rheaume <tiberiusrheaume@lbl.gov>, Axel Huebl <axelhuebl@lbl.gov>"
diff --git a/Examples/Tests/reduced_diags/inputs b/Examples/Tests/reduced_diags/inputs
index 293115f1e..9b8e45d3e 100644
--- a/Examples/Tests/reduced_diags/inputs
+++ b/Examples/Tests/reduced_diags/inputs
@@ -68,7 +68,7 @@ photons.uz_th = 0.2
#################################
###### REDUCED DIAGS ############
#################################
-warpx.reduced_diags_names = EP NP EF PP PF MF FP FP_integrate MR FR_Max FR_Min FR_Integral
+warpx.reduced_diags_names = EP NP EF PP PF MF MR FP FP_integrate FP_line FP_plane FR_Max FR_Min FR_Integral
EP.type = ParticleEnergy
EP.intervals = 200
EF.type = FieldEnergy
@@ -86,11 +86,36 @@ FP.x_probe = 0.53125
FP.y_probe = 0.53125
FP.z_probe = 0.53125
FP_integrate.type = FieldProbe
-FP_integrate.intervals = 200
+FP_integrate.intervals = 20
+FP_integrate.probe_geometry = Point
FP_integrate.x_probe = 0.53125
FP_integrate.y_probe = 0.53125
FP_integrate.z_probe = 0.53125
FP_integrate.integrate = 1
+FP_line.type = FieldProbe
+FP_line.intervals = 200
+FP_line.probe_geometry = Line
+FP_line.x_probe = 0.53125
+FP_line.y_probe = 0.53125
+FP_line.z_probe = 0.53125
+FP_line.x1_probe = 0.70225
+FP_line.y1_probe = 0.70225
+FP_line.z1_probe = 0.70225
+FP_line.resolution = 100
+FP_plane.type = FieldProbe
+FP_plane.intervals = 200
+FP_plane.probe_geometry = Plane
+FP_plane.x_probe = 0.5
+FP_plane.y_probe = 0.5
+FP_plane.z_probe = 0.5
+FP_plane.target_normal_x = 0
+FP_plane.target_normal_y = 0
+FP_plane.target_normal_z = 1
+FP_plane.target_up_x = 0
+FP_plane.target_up_y = 1
+FP_plane.target_up_z = 0
+FP_plane.detector_radius = 0.25
+FP_plane.resolution = 10
MR.type = RhoMaximum
MR.intervals = 200
NP.type = ParticleNumber
diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini
index 04a579a20..6fc233892 100644
--- a/Regression/WarpX-tests.ini
+++ b/Regression/WarpX-tests.ini
@@ -2527,6 +2527,23 @@ doVis = 0
compareParticles = 0
analysisRoutine = Examples/Tests/ElectrostaticSphere/analysis_electrostatic_sphere.py
+[FieldProbe]
+buildDir = .
+inputFile = Examples/Tests/FieldProbe/inputs_2d
+runtime_params =
+dim = 2
+addToCompileString = USE_EB=TRUE
+cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_EB=ON
+restartTest = 0
+useMPI = 1
+numprocs = 2
+useOMP = 1
+numthreads = 1
+compileTest = 0
+doVis = 0
+compareParticles = 0
+analysisRoutine = Examples/Tests/FieldProbe/analysis_field_probe.py
+
[embedded_circle]
buildDir = .
inputFile = Examples/Tests/embedded_circle/inputs_2d
diff --git a/Regression/requirements.txt b/Regression/requirements.txt
index a281ad2b2..6ae75e268 100644
--- a/Regression/requirements.txt
+++ b/Regression/requirements.txt
@@ -2,5 +2,6 @@ matplotlib
mpi4py
numpy
openpmd-api
+pandas
scipy
yt
diff --git a/Source/Diagnostics/ReducedDiags/FieldProbe.H b/Source/Diagnostics/ReducedDiags/FieldProbe.H
index 8028d987d..15c5d18f2 100644
--- a/Source/Diagnostics/ReducedDiags/FieldProbe.H
+++ b/Source/Diagnostics/ReducedDiags/FieldProbe.H
@@ -17,6 +17,17 @@
#include <unordered_map>
#include <string>
+#include <vector>
+
+/**
+ * This enumeration is used for assigning structural geometry levels (point vs line vs plane)
+ */
+enum struct DetectorGeometry
+{
+ Point = 0,
+ Line,
+ Plane
+};
/**
* This class mainly contains a function that computes the value of each component
@@ -52,18 +63,41 @@ public:
* Define constants used throughout FieldProbe
*/
- //! noutputs is 7 (Ex, Ey, Ez, Bx, By, Bz, S)
- static constexpr int noutputs = FieldProbePIdx::nattribs;
+ //! noutputs is 10 (x, y, z, Ex, Ey, Ez, Bx, By, Bz, S)
+ static constexpr int noutputs = FieldProbePIdx::nattribs + 3;
private:
amrex::Real x_probe, y_probe, z_probe;
+ amrex::Real x1_probe, y1_probe, z1_probe;
+ amrex::Real target_normal_x, target_normal_y, target_normal_z;
+ amrex::Real target_up_x, target_up_y, target_up_z;
+ amrex::Real detector_radius;
+
+ //! counts number of particles for all MPI ranks
+ long m_valid_particles {0};
+
+ //! determines geometry of detector point distribution
+ DetectorGeometry m_probe_geometry = DetectorGeometry::Point;
+
+ //! determines number of particles places for non-point geometries
+ int m_resolution = 0;
+
+ //! Empty vector for to which data is pushed
+ amrex::Vector<amrex::Real> m_data;
+
+ //! Empty array to be used by IOProcessor node to store and output data
+ amrex::Vector<amrex::Real> m_data_out;
//! this is the particle container in which probe particles are stored
FieldProbeParticleContainer m_probe;
- //! if true, integrate values over time instead of probing instantaneous values
+ //! if true, integrate values over time instead of probing instantaneous values
bool m_field_probe_integrate = false;
+
+ //! particle shape used for field gather
int interp_order = 1;
+
+ //! Judges whether to gather raw fields or interpolated data
bool raw_fields = false;
/**
diff --git a/Source/Diagnostics/ReducedDiags/FieldProbe.cpp b/Source/Diagnostics/ReducedDiags/FieldProbe.cpp
index d29795c35..3abf83347 100644
--- a/Source/Diagnostics/ReducedDiags/FieldProbe.cpp
+++ b/Source/Diagnostics/ReducedDiags/FieldProbe.cpp
@@ -31,9 +31,13 @@
#include <AMReX_Vector.H>
#include <cmath>
+#include <cstddef>
+#include <cstdlib>
+#include <iostream>
#include <ostream>
#include <string>
#include <unordered_map>
+#include <vector>
using namespace amrex;
@@ -72,18 +76,55 @@ FieldProbe::FieldProbe (std::string rd_name)
* Define whether ot not to integrate fields
*/
amrex::ParmParse pp_rd_name(rd_name);
- getWithParser(pp_rd_name, "x_probe", x_probe);
- getWithParser(pp_rd_name, "y_probe", y_probe);
- getWithParser(pp_rd_name, "z_probe", z_probe);
+ std::string m_probe_geometry_str = "Point";
+ pp_rd_name.query("probe_geometry", m_probe_geometry_str);
+
+ if (m_probe_geometry_str == "Point")
+ {
+ m_probe_geometry = DetectorGeometry::Point;
+ getWithParser(pp_rd_name, "x_probe", x_probe);
+ getWithParser(pp_rd_name, "y_probe", y_probe);
+ getWithParser(pp_rd_name, "z_probe", z_probe);
+ }
+ else if (m_probe_geometry_str == "Line")
+ {
+ m_probe_geometry = DetectorGeometry::Line;
+ getWithParser(pp_rd_name, "x_probe", x_probe);
+ getWithParser(pp_rd_name, "y_probe", y_probe);
+ getWithParser(pp_rd_name, "z_probe", z_probe);
+ getWithParser(pp_rd_name, "x1_probe", x1_probe);
+ getWithParser(pp_rd_name, "y1_probe", y1_probe);
+ getWithParser(pp_rd_name, "z1_probe", z1_probe);
+ getWithParser(pp_rd_name, "resolution", m_resolution);
+ }
+ else if (m_probe_geometry_str == "Plane")
+ {
+ m_probe_geometry = DetectorGeometry::Plane;
+ getWithParser(pp_rd_name, "x_probe", x_probe);
+ getWithParser(pp_rd_name, "y_probe", y_probe);
+ getWithParser(pp_rd_name, "z_probe", z_probe);
+ getWithParser(pp_rd_name, "target_normal_x", target_normal_x);
+ getWithParser(pp_rd_name, "target_normal_y", target_normal_y);
+ getWithParser(pp_rd_name, "target_normal_z", target_normal_z);
+ getWithParser(pp_rd_name, "target_up_x", target_up_x);
+ getWithParser(pp_rd_name, "target_up_y", target_up_y);
+ getWithParser(pp_rd_name, "target_up_z", target_up_z);
+ getWithParser(pp_rd_name, "detector_radius", detector_radius);
+ getWithParser(pp_rd_name, "resolution", m_resolution);
+ }
+ else
+ {
+ std::string err_str = "ERROR: Invalid probe geometry '";
+ err_str.append(m_probe_geometry_str);
+ err_str.append("'. Valid geometries are Point, Line or Plane.");
+ amrex::Abort(err_str);
+ }
pp_rd_name.query("integrate", m_field_probe_integrate);
pp_rd_name.query("raw_fields", raw_fields);
pp_rd_name.query("interp_order", interp_order);
AMREX_ALWAYS_ASSERT_WITH_MESSAGE(interp_order <= WarpX::nox ,
"Field probe interp_order should be less than or equal to algo.particle_shape");
- // resize data array
- m_data.resize(noutputs, 0.0_rt);
-
if (ParallelDescriptor::IOProcessor())
{
if ( m_IsNotRestart )
@@ -93,7 +134,6 @@ FieldProbe::FieldProbe (std::string rd_name)
// write header row
int c = 0;
- ofs << "#";
ofs << "[" << c++ << "]step()";
ofs << m_sep;
ofs << "[" << c++ << "]time(s)";
@@ -104,44 +144,50 @@ FieldProbe::FieldProbe (std::string rd_name)
{
u_map =
{
- {FieldProbePIdx::Ex , " (V*s/m) "},
- {FieldProbePIdx::Ey , " (V*s/m) "},
- {FieldProbePIdx::Ez , " (V*s/m) "},
- {FieldProbePIdx::Bx , " (T*s) "},
- {FieldProbePIdx::By , " (T*s) "},
- {FieldProbePIdx::Bz , " (T*s) "},
- {FieldProbePIdx::S , " (W*s/m^2) "}
+ {FieldProbePIdx::Ex , "-(V*s/m)"},
+ {FieldProbePIdx::Ey , "-(V*s/m)"},
+ {FieldProbePIdx::Ez , "-(V*s/m)"},
+ {FieldProbePIdx::Bx , "-(T*s)"},
+ {FieldProbePIdx::By , "-(T*s)"},
+ {FieldProbePIdx::Bz , "-(T*s)"},
+ {FieldProbePIdx::S , "-(W*s/m^2)"}
};
}
else
{
u_map =
{
- {FieldProbePIdx::Ex , " (V/m) "},
- {FieldProbePIdx::Ey , " (V/m) "},
- {FieldProbePIdx::Ez , " (V/m) "},
- {FieldProbePIdx::Bx , " (T) "},
- {FieldProbePIdx::By , " (T) "},
- {FieldProbePIdx::Bz , " (T) "},
- {FieldProbePIdx::S , " (W/m^2) "}
+ {FieldProbePIdx::Ex , "-(V/m)"},
+ {FieldProbePIdx::Ey , "-(V/m)"},
+ {FieldProbePIdx::Ez , "-(V/m)"},
+ {FieldProbePIdx::Bx , "-(T)"},
+ {FieldProbePIdx::By , "-(T)"},
+ {FieldProbePIdx::Bz , "-(T)"},
+ {FieldProbePIdx::S , "-(W/m^2)"}
};
}
for (int lev = 0; lev < nLevel; ++lev)
{
ofs << m_sep;
- ofs << "[" << c++ << "]probe_Ex_lev" + std::to_string(lev) + u_map[FieldProbePIdx::Ex];
+ ofs << "[" << c++ << "]part_x_lev" + std::to_string(lev) + "-(m)";
+ ofs << m_sep;
+ ofs << "[" << c++ << "]part_y_lev" + std::to_string(lev) + "-(m)";
+ ofs << m_sep;
+ ofs << "[" << c++ << "]part_z_lev" + std::to_string(lev) + "-(m)";
+ ofs << m_sep;
+ ofs << "[" << c++ << "]part_Ex_lev" + std::to_string(lev) + u_map[FieldProbePIdx::Ex];
ofs << m_sep;
- ofs << "[" << c++ << "]probe_Ey_lev" + std::to_string(lev) + u_map[FieldProbePIdx::Ey];
+ ofs << "[" << c++ << "]part_Ey_lev" + std::to_string(lev) + u_map[FieldProbePIdx::Ey];
ofs << m_sep;
- ofs << "[" << c++ << "]probe_Ez_lev" + std::to_string(lev) + u_map[FieldProbePIdx::Ez];
+ ofs << "[" << c++ << "]part_Ez_lev" + std::to_string(lev) + u_map[FieldProbePIdx::Ez];
ofs << m_sep;
- ofs << "[" << c++ << "]probe_Bx_lev" + std::to_string(lev) + u_map[FieldProbePIdx::Bx];
+ ofs << "[" << c++ << "]part_Bx_lev" + std::to_string(lev) + u_map[FieldProbePIdx::Bx];
ofs << m_sep;
- ofs << "[" << c++ << "]probe_By_lev" + std::to_string(lev) + u_map[FieldProbePIdx::By];
+ ofs << "[" << c++ << "]part_By_lev" + std::to_string(lev) + u_map[FieldProbePIdx::By];
ofs << m_sep;
- ofs << "[" << c++ << "]probe_Bz_lev" + std::to_string(lev) + u_map[FieldProbePIdx::Bz];
+ ofs << "[" << c++ << "]part_Bz_lev" + std::to_string(lev) + u_map[FieldProbePIdx::Bz];
ofs << m_sep;
- ofs << "[" << c++ << "]probe_S_lev" + std::to_string(lev) + u_map[FieldProbePIdx::S]; //update all units if integrating (might be energy / m^2)
+ ofs << "[" << c++ << "]part_S_lev" + std::to_string(lev) + u_map[FieldProbePIdx::S];
}
ofs << std::endl;
@@ -153,24 +199,111 @@ FieldProbe::FieldProbe (std::string rd_name)
void FieldProbe::InitData ()
{
- //create 1D array for X, Y, and Z of particles
- amrex::Vector<amrex::ParticleReal> xpos;
- amrex::Vector<amrex::ParticleReal> ypos;
- amrex::Vector<amrex::ParticleReal> zpos;
+ if (m_probe_geometry == DetectorGeometry::Point)
+ {
- // for now, only one MPI rank adds a probe particle
- if (ParallelDescriptor::IOProcessor())
+ // create 1D vector for X, Y, and Z of particles
+ amrex::Vector<amrex::ParticleReal> xpos;
+ amrex::Vector<amrex::ParticleReal> ypos;
+ amrex::Vector<amrex::ParticleReal> zpos;
+
+ // for now, only one MPI rank adds a probe particle
+ if (ParallelDescriptor::IOProcessor())
+ {
+ xpos.push_back(x_probe);
+ ypos.push_back(y_probe);
+ zpos.push_back(z_probe);
+ }
+
+ // add particles on lev 0 to m_probe
+ m_probe.AddNParticles(0, xpos, ypos, zpos);
+ }
+ else if (m_probe_geometry == DetectorGeometry::Line)
{
- xpos.push_back(x_probe);
- ypos.push_back(y_probe);
- zpos.push_back(z_probe);
+ amrex::Vector<amrex::ParticleReal> xpos;
+ amrex::Vector<amrex::ParticleReal> ypos;
+ amrex::Vector<amrex::ParticleReal> zpos;
+ if (ParallelDescriptor::IOProcessor())
+ {
+ xpos.reserve(m_resolution);
+ ypos.reserve(m_resolution);
+ zpos.reserve(m_resolution);
+
+ // Final - initial / steps. Array contains dx, dy, dz
+ amrex::Real DetLineStepSize[3]{
+ (x1_probe - x_probe) / (m_resolution - 1),
+ (y1_probe - y_probe) / (m_resolution - 1),
+ (z1_probe - z_probe) / (m_resolution - 1)};
+ for ( int step = 0; step < m_resolution; step++)
+ {
+ xpos.push_back(x_probe + (DetLineStepSize[0] * step));
+ ypos.push_back(y_probe + (DetLineStepSize[1] * step));
+ zpos.push_back(z_probe + (DetLineStepSize[2] * step));
+ }
+ }
+ m_probe.AddNParticles(0, xpos, ypos, zpos);
+ }
+ else if (m_probe_geometry == DetectorGeometry::Plane)
+ {
+ amrex::Vector<amrex::ParticleReal> xpos;
+ amrex::Vector<amrex::ParticleReal> ypos;
+ amrex::Vector<amrex::ParticleReal> zpos;
+ if (ParallelDescriptor::IOProcessor())
+ {
+ std::size_t const res2 = std::size_t(m_resolution) * std::size_t(m_resolution);
+ xpos.reserve(res2);
+ ypos.reserve(res2);
+ zpos.reserve(res2);
+
+ // create vector orthonormal to input vectors
+ amrex::Real orthotarget[3]{
+ target_normal_y * target_up_z - target_normal_z * target_up_y,
+ target_normal_z * target_up_x - target_normal_x * target_up_z,
+ target_normal_x * target_up_y - target_normal_y * target_up_x};
+ // find upper left and lower right bounds of detector
+ amrex::Real direction[3]{
+ orthotarget[0] - target_up_x,
+ orthotarget[1] - target_up_y,
+ orthotarget[2] - target_up_z};
+ amrex::Real upperleft[3]{
+ x_probe - (direction[0] * detector_radius),
+ y_probe - (direction[1] * detector_radius),
+ z_probe - (direction[2] * detector_radius)};
+ amrex::Real lowerright[3]{
+ x_probe + (direction[0] * detector_radius),
+ y_probe + (direction[1] * detector_radius),
+ z_probe + (direction[2] * detector_radius)};
+ // create array containing point-to-point step size
+ amrex::Real DetPlaneStepSize[3]{
+ (lowerright[0] - upperleft[0]) / (m_resolution - 1),
+ (lowerright[1] - upperleft[1]) / (m_resolution - 1),
+ (lowerright[2] - upperleft[2]) / (m_resolution - 1)};
+ amrex::Real temp_pos[3]{};
+ // Target point on top of plane (arbitrarily top of plane perpendicular to yz)
+ // For each point along top of plane, fill in YZ's beneath, then push back
+ for ( int step = 0; step < m_resolution; step++)
+ {
+ temp_pos[0] = upperleft[0] + (DetPlaneStepSize[0] * step);
+ for ( int yzstep = 0; yzstep < m_resolution; yzstep++)
+ {
+ temp_pos[1] = upperleft[1] + (DetPlaneStepSize[1] * yzstep);
+ temp_pos[2] = upperleft[2] + (DetPlaneStepSize[2] * yzstep);
+ xpos.push_back(temp_pos[0]);
+ ypos.push_back(temp_pos[1]);
+ zpos.push_back(temp_pos[2]);
+ }
+ }
+ }
+ m_probe.AddNParticles(0, xpos, ypos, zpos);
+ }
+ else
+ {
+ amrex::Abort("ERROR: Invalid probe geometry. Valid geometries are point (0) and line (1).");
}
-
- // add np particles on lev 0 to m_probe
- m_probe.AddNParticles(0, xpos, ypos, zpos);
}
-void FieldProbe::LoadBalance() {
+void FieldProbe::LoadBalance ()
+{
m_probe.Redistribute();
}
@@ -239,15 +372,28 @@ void FieldProbe::ComputeDiags (int step)
amrex::IndexType const Bytype = By.ixType();
amrex::IndexType const Bztype = Bz.ixType();
- //defined for use in determining which CPU contains the particles
- int probe_proc = -1;
-
// loop over each particle
// TODO: add OMP parallel as in PhysicalParticleContainer::Evolve
+ long numparticles = 0; // particles on this MPI rank
using MyParIter = FieldProbeParticleContainer::iterator;
for (MyParIter pti(m_probe, lev); pti.isValid(); ++pti)
{
+ // count particle on MPI rank
+ numparticles += pti.numParticles();
+ }
+
+ if (m_intervals.contains(step+1))
+ {
+ // reset m_data vector to clear pushed values. Reserves data
+ m_data.clear();
+ m_data.shrink_to_fit();
+ m_data.reserve(numparticles * noutputs);
+ }
+
+ for (MyParIter pti(m_probe, lev); pti.isValid(); ++pti)
+ {
const auto getPosition = GetParticlePosition(pti);
+ auto const np = pti.numParticles();
if( ProbeInDomain() )
{
@@ -295,7 +441,6 @@ void FieldProbe::ComputeDiags (int step)
const amrex::GpuArray<amrex::Real, 3> xyzmin_arr = {xyzmin[0], xyzmin[1], xyzmin[2]};
const Dim3 lo = lbound(box);
- // Interpolating to the probe positions for each particle
// Temporarily defining modes and interp outside ParallelFor to avoid GPU compilation errors.
const int temp_modes = WarpX::n_rz_azimuthal_modes;
const int temp_interp_order = interp_order;
@@ -303,7 +448,7 @@ void FieldProbe::ComputeDiags (int step)
const bool temp_field_probe_integrate = m_field_probe_integrate;
amrex::Real const dt = WarpX::GetInstance().getdt(lev);
- long const np = pti.numParticles();
+ // Interpolating to the probe positions for each particle
amrex::ParallelFor( np, [=] AMREX_GPU_DEVICE (long ip)
{
amrex::ParticleReal xp, yp, zp;
@@ -354,7 +499,6 @@ void FieldProbe::ComputeDiags (int step)
}
else
{
- // Either save the interpolated fields or the raw fields depending on the raw_fields flag
part_Ex[ip] = Exp; //remember to add lorentz transform
part_Ey[ip] = Eyp; //remember to add lorentz transform
part_Ez[ip] = Ezp; //remember to add lorentz transform
@@ -364,63 +508,107 @@ void FieldProbe::ComputeDiags (int step)
part_S[ip] = S; //remember to add lorentz transform
}
});// ParallelFor Close
-
// this check is here because for m_field_probe_integrate == True, we always compute
// but we only write when we truly are in an output interval step
- if (m_intervals.contains(step+1)) {
- for (int ip = 0; ip < np; ip++) {
- // Fill output array
- m_data[ip * noutputs + FieldProbePIdx::Ex] = part_Ex[ip];
- m_data[ip * noutputs + FieldProbePIdx::Ey] = part_Ey[ip];
- m_data[ip * noutputs + FieldProbePIdx::Ez] = part_Ez[ip];
- m_data[ip * noutputs + FieldProbePIdx::Bx] = part_Bx[ip];
- m_data[ip * noutputs + FieldProbePIdx::By] = part_By[ip];
- m_data[ip * noutputs + FieldProbePIdx::Bz] = part_Bz[ip];
- m_data[ip * noutputs + FieldProbePIdx::S] = part_S[ip];
+ if (m_intervals.contains(step+1))
+ {
+ for (auto ip=0; ip < np; ip++)
+ {
+ amrex::ParticleReal xp, yp, zp;
+ getPosition(ip, xp, yp, zp);
+
+ // push to output vector
+ m_data.push_back(xp);
+ m_data.push_back(yp);
+ m_data.push_back(zp);
+ m_data.push_back(part_Ex[ip]);
+ m_data.push_back(part_Ey[ip]);
+ m_data.push_back(part_Ez[ip]);
+ m_data.push_back(part_Bx[ip]);
+ m_data.push_back(part_By[ip]);
+ m_data.push_back(part_Bz[ip]);
+ m_data.push_back(part_S[ip]);
}
- /* m_data now contains up-to-date values for:
- * [Ex, Ey, Ez, Bx, By, Bz, and S] */
+ /* m_data now contains up-to-date values for:
+ * [x, y, z, Ex, Ey, Ez, Bx, By, Bz, and S] */
}
-
- // do we have the one and only probe particle on our MPI rank?
- if (np > 0)
- probe_proc = amrex::ParallelDescriptor::MyProc();
}
-
} // end particle iterator loop
-
- // make sure data is in m_data
Gpu::synchronize();
+ if (m_intervals.contains(step+1))
+ {
+ // returns total number of mpi notes into mpisize
+ int mpisize = ParallelDescriptor::NProcs();
- // this check is here because for m_field_probe_integrate == True, we always compute
- // but we only write when we truly are in an output interval step
- if (m_intervals.contains(step+1)) {
- /*
- * All the processors have probe_proc = -1 except the one that contains the point, which
- * has probe_proc equal to a number >=0. Therefore, ReduceIntMax communicates to all the
- * processors the rank of the processor which contains the point
- */
- amrex::ParallelDescriptor::ReduceIntMax(probe_proc);
-
- if (probe_proc != amrex::ParallelDescriptor::IOProcessorNumber() and probe_proc != -1) {
- if (amrex::ParallelDescriptor::MyProc() == probe_proc) {
- amrex::ParallelDescriptor::Send(m_data.data(), noutputs,
- amrex::ParallelDescriptor::IOProcessorNumber(),
- 0);
- }
- if (amrex::ParallelDescriptor::MyProc()
- == amrex::ParallelDescriptor::IOProcessorNumber()) {
- amrex::ParallelDescriptor::Recv(m_data.data(), noutputs, probe_proc, 0);
+ // allocates data space for length_array. Will contain size of m_data from each processor
+ amrex::Vector<int> length_vector;
+ amrex::Vector<int> localsize;
+
+ if (amrex::ParallelDescriptor::IOProcessor()) {
+ length_vector.resize(mpisize, 0);
+ }
+ localsize.resize(1,0);
+ localsize[0] = m_data.size();
+
+ // gather size of m_data from each processor
+ amrex::ParallelDescriptor::Gather(localsize.data(), 1,
+ length_vector.data(), 1,
+ amrex::ParallelDescriptor::IOProcessorNumber());
+
+ // IO processor sums values from length_array to get size of total output array.
+ /* displs records the size of each m_data as well as previous displs. This array
+ * tells Gatherv where in the m_data_out array allocation to write incomming data. */
+ long total_data_size = 0;
+ amrex::Vector<int> displs_vector;
+ if (amrex::ParallelDescriptor::IOProcessor()) {
+ displs_vector.resize(mpisize, 0);
+ displs_vector[0] = 0;
+ total_data_size += length_vector[0];
+ for (int i=1; i<mpisize; i++) {
+ displs_vector[i] = (displs_vector[i-1] + length_vector[i-1]);
+ total_data_size += length_vector[i];
}
+ // valid particles are counted (for all MPI ranks) to inform output processes as to size of output
+ m_valid_particles = total_data_size / noutputs;
+ m_data_out.resize(total_data_size, 0);
}
- } // send to IO Processor
+ // resize receive buffer (resize, initialize 0)
+ // gather m_data of varied lengths from all processors. Prints to m_data_out
+ amrex::ParallelDescriptor::Gatherv(m_data.data(), localsize[0],
+ m_data_out.data(), length_vector, displs_vector,
+ amrex::ParallelDescriptor::IOProcessorNumber());
+ }
}// end loop over refinement levels
+ // make sure data is in m_data on the IOProcessor
+ // TODO: In the future, we want to use a parallel I/O method instead (plotfiles or openPMD)
} // end void FieldProbe::ComputeDiags
void FieldProbe::WriteToFile (int step) const
{
if (ProbeInDomain() && amrex::ParallelDescriptor::IOProcessor())
{
- ReducedDiags::WriteToFile (step);
+ // open file
+ std::ofstream ofs{m_path + m_rd_name + "." + m_extension,
+ std::ofstream::out | std::ofstream::app};
+
+ // loop over num valid particles and write
+ for (int i = 0; i < m_valid_particles; i++)
+ {
+ ofs << std::fixed << std::defaultfloat;
+ ofs << step + 1;
+ ofs << m_sep;
+ ofs << std::fixed << std::setprecision(14) << std::scientific;
+ // write time
+ ofs << WarpX::GetInstance().gett_new(0);
+
+ for (int k = 0; k < noutputs; k++)
+ {
+ ofs << m_sep;
+ ofs << m_data_out[i * noutputs + k];
+ }
+ ofs << std::endl;
+ } // end loop over data size
+ // close file
+ ofs.close();
}
}
diff --git a/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp b/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp
index c72362b6b..111e97e96 100644
--- a/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp
+++ b/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp
@@ -108,8 +108,7 @@ FieldProbeParticleContainer::AddNParticles (int lev,
#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ)
amrex::ignore_unused(y) ;
p.pos(0) = x[i];
- p.pos(1) = 0;
- p.pos(2) = z[i];
+ p.pos(1) = z[i];
#endif
// write position, cpu id, and particle id to particle
pinned_tile.push_back(p);
diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp
index 547f632e9..c3c297b56 100644
--- a/Source/Evolve/WarpXEvolve.cpp
+++ b/Source/Evolve/WarpXEvolve.cpp
@@ -335,6 +335,7 @@ WarpX::Evolve (int numsteps)
/// reduced diags
if (reduced_diags->m_plot_rd != 0)
{
+ reduced_diags->LoadBalance();
reduced_diags->ComputeDiags(step);
reduced_diags->WriteToFile(step);
}
diff --git a/requirements.txt b/requirements.txt
index d28aeb00c..360d50c47 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -12,3 +12,4 @@ picmistandard==0.0.19
# openpmd-api
# openpmd-viewer
# matplotlib
+# pandas