diff options
-rw-r--r-- | .github/workflows/source.yml | 4 | ||||
-rwxr-xr-x | .github/workflows/source/hasEOLwhiteSpace | 4 | ||||
-rw-r--r-- | Source/Particles/MultiParticleContainer.cpp | 2 | ||||
-rw-r--r-- | Tools/performance_tests/cori.py | 52 | ||||
-rw-r--r-- | Tools/performance_tests/functions_perftest.py | 4 | ||||
-rw-r--r-- | Tools/performance_tests/run_automated.py | 10 | ||||
-rw-r--r-- | Tools/performance_tests/summit.py | 10 |
7 files changed, 44 insertions, 42 deletions
diff --git a/.github/workflows/source.yml b/.github/workflows/source.yml index db6dd1191..8e1dbec05 100644 --- a/.github/workflows/source.yml +++ b/.github/workflows/source.yml @@ -11,5 +11,5 @@ jobs: - uses: actions/checkout@v1 - name: TABs run: .github/workflows/source/hasTabs - # - name: End-of-Line whitespaces - # run: .github/workflows/source/hasEOLwhiteSpace + - name: End-of-Line whitespaces + run: .github/workflows/source/hasEOLwhiteSpace diff --git a/.github/workflows/source/hasEOLwhiteSpace b/.github/workflows/source/hasEOLwhiteSpace index 345786468..9c51dd716 100755 --- a/.github/workflows/source/hasEOLwhiteSpace +++ b/.github/workflows/source/hasEOLwhiteSpace @@ -40,9 +40,11 @@ then echo "Run the following command(s) on the above files to remove your" echo "end-of-line (EOL) white spaces:" echo "" + echo "GNU_SED=\$(sed --help >/dev/null 2>&1 && { echo 1; } || { echo 0; })" + echo "[[ \${GNU_SED} -eq 1 ]] && REPLACE=\"sed -i 's/[[:blank:]]\+$//'\" || REPLACE=\"sed -i '' -E 's/[[:blank:]]+$//'\"" for i in ${files[@]} do - echo "sed -i 's/[[:blank:]]\+$//' $i" + echo "eval \${REPLACE} $i" done fi diff --git a/Source/Particles/MultiParticleContainer.cpp b/Source/Particles/MultiParticleContainer.cpp index 9c9bdcec4..53ce0fbbc 100644 --- a/Source/Particles/MultiParticleContainer.cpp +++ b/Source/Particles/MultiParticleContainer.cpp @@ -106,7 +106,7 @@ MultiParticleContainer::ReadParameters () for (auto const& name : photon_species) { auto it = std::find(species_names.begin(), species_names.end(), name); AMREX_ALWAYS_ASSERT_WITH_MESSAGE( - it != species_names.end(), + it != species_names.end(), "ERROR: species in particles.rigid_injected_species must be part of particles.species_names"); int i = std::distance(species_names.begin(), it); species_types[i] = PCTypes::Photon; diff --git a/Tools/performance_tests/cori.py b/Tools/performance_tests/cori.py index dbe3a1e2a..6e47f1175 100644 --- a/Tools/performance_tests/cori.py +++ b/Tools/performance_tests/cori.py @@ -31,8 +31,8 @@ def get_config_command(compiler, architecture): config_command += 'module load craype-haswell;' return config_command -# This function runs a batch script with -# dependencies to perform the analysis +# This function runs a batch script with +# dependencies to perform the analysis # after all performance tests are done. def process_analysis(automated, cwd, compiler, architecture, n_node_list, start_date): dependencies = '' @@ -113,45 +113,45 @@ def get_test_list(n_repeat): test_list_unq = [] # n_node is kept to None and passed in functions as an external argument # That way, several test_element_instance run with the same n_node on the same batch job - test_list_unq.append( test_element(input_file='automated_test_1_uniform_rest_32ppc', - n_mpi_per_node=8, - n_omp=8, - n_cell=[128, 128, 128], + test_list_unq.append( test_element(input_file='automated_test_1_uniform_rest_32ppc', + n_mpi_per_node=8, + n_omp=8, + n_cell=[128, 128, 128], max_grid_size=64, blocking_factor=32, n_step=10) ) - test_list_unq.append( test_element(input_file='automated_test_2_uniform_rest_1ppc', - n_mpi_per_node=8, - n_omp=8, - n_cell=[256, 256, 512], + test_list_unq.append( test_element(input_file='automated_test_2_uniform_rest_1ppc', + n_mpi_per_node=8, + n_omp=8, + n_cell=[256, 256, 512], max_grid_size=64, blocking_factor=32, n_step=10) ) - test_list_unq.append( test_element(input_file='automated_test_3_uniform_drift_4ppc', - n_mpi_per_node=8, - n_omp=8, - n_cell=[128, 128, 128], + test_list_unq.append( test_element(input_file='automated_test_3_uniform_drift_4ppc', + n_mpi_per_node=8, + n_omp=8, + n_cell=[128, 128, 128], max_grid_size=64, blocking_factor=32, n_step=10) ) - test_list_unq.append( test_element(input_file='automated_test_4_labdiags_2ppc', - n_mpi_per_node=8, - n_omp=8, - n_cell=[64, 64, 128], + test_list_unq.append( test_element(input_file='automated_test_4_labdiags_2ppc', + n_mpi_per_node=8, + n_omp=8, + n_cell=[64, 64, 128], max_grid_size=64, blocking_factor=32, n_step=50) ) - test_list_unq.append( test_element(input_file='automated_test_5_loadimbalance', - n_mpi_per_node=8, - n_omp=8, - n_cell=[128, 128, 128], + test_list_unq.append( test_element(input_file='automated_test_5_loadimbalance', + n_mpi_per_node=8, + n_omp=8, + n_cell=[128, 128, 128], max_grid_size=64, blocking_factor=32, n_step=10) ) - test_list_unq.append( test_element(input_file='automated_test_6_output_2ppc', - n_mpi_per_node=8, - n_omp=8, - n_cell=[128, 256, 256], + test_list_unq.append( test_element(input_file='automated_test_6_output_2ppc', + n_mpi_per_node=8, + n_omp=8, + n_cell=[128, 256, 256], max_grid_size=64, blocking_factor=32, n_step=0) ) diff --git a/Tools/performance_tests/functions_perftest.py b/Tools/performance_tests/functions_perftest.py index 67622317a..8bc1000d7 100644 --- a/Tools/performance_tests/functions_perftest.py +++ b/Tools/performance_tests/functions_perftest.py @@ -7,7 +7,7 @@ import git # Each instance of this class contains information for a single test. class test_element(): - def __init__(self, input_file=None, n_node=None, n_mpi_per_node=None, + def __init__(self, input_file=None, n_node=None, n_mpi_per_node=None, n_omp=None, n_cell=None, n_step=None, max_grid_size=None, blocking_factor=None): self.input_file = input_file @@ -103,7 +103,7 @@ def run_batch_nnode(test_list, res_dir, bin_name, config_command, batch_string, bin_dir = cwd + 'Bin/' shutil.copy(bin_dir + bin_name, res_dir) os.chdir(res_dir) - + for count, current_test in enumerate(test_list): shutil.copy(cwd + current_test.input_file, res_dir) batch_file = 'batch_script.sh' diff --git a/Tools/performance_tests/run_automated.py b/Tools/performance_tests/run_automated.py index fd771faac..a6a05fb54 100644 --- a/Tools/performance_tests/run_automated.py +++ b/Tools/performance_tests/run_automated.py @@ -4,7 +4,7 @@ import pandas as pd from functions_perftest import store_git_hash, get_file_content, \ run_batch_nnode, extract_dataframe -# Get name of supercomputer and import configuration functions from +# Get name of supercomputer and import configuration functions from # machine-specific file if os.getenv("LMOD_SYSTEM_NAME") == 'summit': machine = 'summit' @@ -23,7 +23,7 @@ if os.getenv("NERSC_HOST") == 'cori': # requirements: # - python packages: gitpython and pandas -# - AUTOMATED_PERF_TESTS: environment variables where warpx, +# - AUTOMATED_PERF_TESTS: environment variables where warpx, # amrex and picsar are installed ($AUTOMATED_PERF_TESTS/warpx etc.) # - SCRATCH: environment variable where performance results are written. # This script will create folder $SCRATCH/performance_warpx/ @@ -99,7 +99,7 @@ if args.automated == True: push_on_perf_log_repo = False pull_3_repos = True recompile = True - if machine == 'summit': + if machine == 'summit': compiler = 'pgi' architecture = 'gpu' @@ -154,7 +154,7 @@ if args.mode == 'run': git_repo.pull() git_repo = git.cmd.Git( warpx_dir ) git_repo.pull() - + # Copy WarpX/GNUmakefile to current directory and recompile # with specific options for automated performance tests. # This way, performance test compilation does not mess with user's @@ -207,7 +207,7 @@ if args.mode == 'run': run_string = get_run_string(current_run, architecture, n_node, count, bin_name, runtime_param_string) batch_string += run_string batch_string += 'rm -rf plotfiles lab_frame_data diags\n' - + submit_job_command = get_submit_job_command() # Run the simulations. run_batch_nnode(test_list_n_node, res_dir, bin_name, config_command, batch_string, submit_job_command) diff --git a/Tools/performance_tests/summit.py b/Tools/performance_tests/summit.py index 69598f1fd..33b9e0981 100644 --- a/Tools/performance_tests/summit.py +++ b/Tools/performance_tests/summit.py @@ -13,8 +13,8 @@ def get_config_command(compiler, architecture): config_command += 'module load cuda;' return config_command -# This function runs a batch script with -# dependencies to perform the analysis +# This function runs a batch script with +# dependencies to perform the analysis # after all performance tests are done. def process_analysis(automated, cwd, compiler, architecture, n_node_list, start_date): @@ -44,7 +44,7 @@ def process_analysis(automated, cwd, compiler, architecture, n_node_list, start_ f_exe.write(batch_string) f_exe.close() os.system('chmod 700 ' + batch_file) - + print( 'process_analysis line: ' + 'bsub ' + batch_file) os.system('bsub ' + batch_file) @@ -66,8 +66,8 @@ def get_batch_string(test_list, job_time_min, Cname, n_node): batch_string += '#BSUB -nnodes ' + str(n_node) + '\n' batch_string += '#BSUB -J ' + test_list[0].input_file + '\n' batch_string += '#BSUB -e error.txt\n' - batch_string += 'module load pgi\n' - batch_string += 'module load cuda\n' + batch_string += 'module load pgi\n' + batch_string += 'module load cuda\n' return batch_string def get_run_string(current_test, architecture, n_node, count, bin_name, runtime_param_string): |