blob: ad79ca969527fda26f039913d5c31ef7e707a858 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
|
#! /usr/bin/env python
import sys
import yt
import numpy as np
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksumAPI
# Load output data generated after restart
filename = sys.argv[1]
ds_restart = yt.load(filename)
ds_restart.force_periodicity()
ad_restart = ds_restart.covering_grid(level = 0,
left_edge = ds_restart.domain_left_edge,
dims = ds_restart.domain_dimensions)
# Load output data generated from initial run
benchmark = 'orig_' + filename
ds_benchmark = yt.load(benchmark)
ds_benchmark.force_periodicity()
ad_benchmark = ds_benchmark.covering_grid(level = 0,
left_edge = ds_benchmark.domain_left_edge,
dims = ds_benchmark.domain_dimensions)
# Loop over all fields (all particle species, all particle attributes, all grid fields)
# and compare output data generated from initial run with output data generated after restart
tolerance = 1e-12
print('\ntolerance = {:g}'.format(tolerance))
print()
for field in ds_benchmark.field_list:
dr = ad_restart[field].squeeze().v
db = ad_benchmark[field].squeeze().v
error = np.amax(np.abs(dr - db))
if (np.amax(np.abs(db)) != 0.):
error /= np.amax(np.abs(db))
print('field: {}; error = {:g}'.format(field, error))
assert(error < tolerance)
print()
# Check-sum analysis
filename = sys.argv[1]
test_name = filename[:-9] # Could also be os.path.split(os.getcwd())[1]
checksumAPI.evaluate_checksum(test_name, filename)
|