aboutsummaryrefslogtreecommitdiff
path: root/Tools/read_raw_data.py
blob: 7dc36520dc18abb1f9d33e0ae0b7c762844ac58b (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
from glob import glob
import numpy as np
from collections import namedtuple

HeaderInfo = namedtuple('HeaderInfo', ['version', 'how', 'ncomp', 'nghost'])

_component_names = ['Ex', 'Ey', 'Ez', 'Bx', 'By', 'Bz', 'jx', 'jy', 'jz', 'rho']

def read_data(plt_file):
    '''

    This function reads the raw (i.e. not averaged to cell centers) data 
    from a WarpX plt file. The plt file must have been written with the
    plot_raw_fields option turned on, so that it contains a raw_data 
    sub-directory. This is only really useful for single-level data.

    Arguments:

        plt_file : An AMReX plt_file file. Must contain a raw_data directory.
    
    Returns:

        A list of dictionaries where the keys are field name strings and the values 
        are numpy arrays. Each entry in the list corresponds to a different level.

    Example:

        >>> data = read_data("plt00016")
        >>> print(data.keys())
        >>> print(data['Ex'].shape)

    '''
    all_data = []
    raw_files = glob(plt_file + "/raw_fields/Level_*/")
    for raw_file in raw_files:
        field_names = _get_field_names(raw_file)
    
        data = {}
        for field in field_names:
            data[field] = _read_field(raw_file, field)

        all_data.append(data)

    return all_data


def read_lab_snapshot(snapshot):
    '''

    This reads the data from one of the lab frame snapshots generated when
    WarpX is run with boosted frame diagnostics turned on. It returns a 
    dictionary of numpy arrays, where each key corresponds to one of the
    data fields ("Ex", "By,", etc... ). These values are cell-centered.

    '''

    hdrs = glob(snapshot + "/Level_0/slice?????_H")
    hdrs.sort()

    boxes, file_names, offsets, header = _read_header(hdrs[0])
    dom_lo, dom_hi = _combine_boxes(boxes)
    domain_size = dom_hi - dom_lo + 1
    
    space_dim = len(dom_lo)
        
    data = {}
    for i in range(header.ncomp):
        if space_dim == 3:
            data[component_names[i]] = np.zeros((domain_size[0], domain_size[1], len(hdrs)))
        elif space_dim == 2:
            data[component_names[i]] = np.zeros((domain_size[0], len(hdrs)))
    
    for i, hdr in enumerate(hdrs):
        slice_data = _read_slice(snapshot, hdr)
        if data is None:
            data = slice_data
        else:
            for k,v in slice_data.items():
                if space_dim == 3:
                    data[k][:,:,i] = v[:,:,0]
                elif space_dim == 2:
                    data[k][:,i] = v[:,0]
                
    return data


def _get_field_names(raw_file):
    header_files = glob(raw_file + "*_H")
    return [hf.split("/")[-1][:-2] for hf in header_files]


def _string_to_numpy_array(s):
    return np.array([int(v) for v in s[1:-1].split(",")], dtype=np.int64)


def _line_to_numpy_arrays(line):
    lo_corner = _string_to_numpy_array(line[0][1:])
    hi_corner = _string_to_numpy_array(line[1][:])
    node_type = _string_to_numpy_array(line[2][:-1]) 
    return lo_corner, hi_corner, node_type


def _read_header(header_file):
    with open(header_file, "r") as f:

        version = int(f.readline())
        how = int(f.readline())
        ncomp = int(f.readline())
        nghost = int(f.readline())

        header = HeaderInfo(version, how, ncomp, nghost)

        # skip the next line
        f.readline()

        # read boxes
        boxes = []
        for line in f:
            clean_line = line.strip().split()
            if clean_line == [')']:
                break
            lo_corner, hi_corner, node_type = _line_to_numpy_arrays(clean_line)
            boxes.append((lo_corner - nghost,
                          hi_corner + nghost,
                          node_type))

        # read the file and offset position for the corresponding box
        file_names = []
        offsets = []
        for line in f:
            if line.startswith("FabOnDisk:"):
                clean_line = line.strip().split()
                file_names.append(clean_line[1])
                offsets.append(int(clean_line[2]))

    return boxes, file_names, offsets, header


def _combine_boxes(boxes):
    lo_corners, hi_corners = zip(*[(box[0], box[1]) for box in boxes])
    domain_lo = np.min(lo_corners, axis=0)
    domain_hi = np.max(hi_corners, axis=0)
    return domain_lo, domain_hi


def _read_field(raw_file, field_name):

    header_file = raw_file + field + "_H"
    boxes, file_names, offsets, header = _read_header(header_file)

    ng = header.nghost
    lo, hi = _combine_boxes(boxes)
    data = np.zeros(hi - lo + 1)

    for box, fn, offset in zip(boxes, file_names, offsets):
        lo = box[0]
        hi = box[1]
        shape = hi - lo + 1
        with open(raw_file + fn, "rb") as f:
            f.seek(offset)
            f.readline()  # always skip the first line
            arr = np.fromfile(f, 'float64', np.product(shape))
            arr = arr.reshape(shape, order='F')
            data[[slice(l,h+1) for l, h in zip(lo+ng, hi+ng)]] = arr

    return data



def _read_slice(snapshot, header_fn):
  
    boxes, file_names, offsets, header = _read_header(header_fn)

    ng = header.nghost
    dom_lo, dom_hi = _combine_boxes(boxes)
      
    all_data = {}
    for i in range(header.ncomp):
        all_data[_component_names[i]] = np.zeros(dom_hi - dom_lo + 1)
    
    for box, fn, offset in zip(boxes, file_names, offsets):
        lo = box[0] - dom_lo
        hi = box[1] - dom_lo
        shape = hi - lo + 1
        size = np.product(shape)
        with open(snapshot + "/Level_0/" + fn, "rb") as f:
            f.seek(offset)
            f.readline()  # always skip the first line
            arr = np.fromfile(f, 'float64', header.ncomp*size)
            for i in range(header.ncomp):
                comp_data = arr[i*size:(i+1)*size].reshape(shape, order='F')
                data = all_data[_component_names[i]]
                data[[slice(l,h+1) for l, h in zip(lo, hi)]] = comp_data
                all_data[_component_names[i]] = data
    return all_data


if __name__ == "__main__":
    import matplotlib
    matplotlib.use('Agg')
    from matplotlib import pyplot as plt

    data = read_data("plt00040")

    # print the shapes of all the fields
    for level in range(2):
        for name, vals in data[level].items():
            print(level, name, vals.shape, vals.min(), vals.max())
    
    # make a projection along the z-axis of the 'jx' field for level 0
    level = 0
    plt.pcolormesh(data[level]['jx'].sum(axis=2))
    plt.savefig('jx')

    # make a projection along the x-axis of the 'Bx_cp' field for level 1
    level = 1
    plt.pcolormesh(data[level]['Bx_cp'].sum(axis=0))
    plt.savefig('Bx_cp')