1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
|
const std = @import("std");
const unicode = std.unicode;
pub const JavascriptString = []u16;
pub fn newJavascriptString(comptime text: []const u8) JavascriptString {
return unicode.utf8ToUtf16LeStringLiteral(text);
}
pub const NodeIndex = u32;
pub const NodeIndexNone = 4294967293;
// TODO: figure out if we actually need this
// -- original comment --
// Files are parsed in parallel for speed. We want to allow each parser to
// generate symbol IDs that won't conflict with each other. We also want to be
// able to quickly merge symbol tables from all files into one giant symbol
// table.
//
// We can accomplish both goals by giving each symbol ID two parts: a source
// index that is unique to the parser goroutine, and an inner index that
// increments as the parser generates new symbol IDs. Then a symbol map can
// be an array of arrays indexed first by source index, then by inner index.
// The maps can be merged quickly by creating a single outer array containing
// all inner arrays from all parsed files.
pub const RefHashCtx = struct {
pub fn hash(_: @This(), key: Ref) u32 {
return key.hash();
}
pub fn eql(_: @This(), ref: Ref, b: Ref) bool {
return ref.asU64() == b.asU64();
}
};
pub const RefCtx = struct {
pub fn hash(_: @This(), key: Ref) u64 {
return key.hash64();
}
pub fn eql(_: @This(), ref: Ref, b: Ref) bool {
return ref.asU64() == b.asU64();
}
};
/// Sets the range of bits starting at `start_bit` upto and excluding `start_bit` + `number_of_bits`
/// to be specific, if the range is N bits long, the N lower bits of `value` will be used; if any of
/// the other bits in `value` are set to 1, this function will panic.
///
/// ```zig
/// var val: u8 = 0b10000000;
/// setBits(&val, 2, 4, 0b00001101);
/// try testing.expectEqual(@as(u8, 0b10110100), val);
/// ```
///
/// ## Panics
/// This method will panic if the `value` exceeds the bit range of the type of `target`
pub fn setBits(
comptime TargetType: type,
target: TargetType,
comptime start_bit: comptime_int,
comptime number_of_bits: comptime_int,
value: TargetType,
) TargetType {
const end_bit = start_bit + number_of_bits;
comptime {
if (number_of_bits == 0) @compileError("non-zero number_of_bits must be provided");
if (@typeInfo(TargetType) == .Int) {
if (@typeInfo(TargetType).Int.signedness != .unsigned) {
@compileError("requires an unsigned integer, found " ++ @typeName(TargetType));
}
if (start_bit >= @bitSizeOf(TargetType)) {
@compileError("start_bit index is out of bounds of the bit field");
}
if (end_bit > @bitSizeOf(TargetType)) {
@compileError("start_bit + number_of_bits is out of bounds of the bit field");
}
} else if (@typeInfo(TargetType) == .ComptimeInt) {
@compileError("comptime_int is unsupported");
} else {
@compileError("requires an unsigned integer, found " ++ @typeName(TargetType));
}
}
if (comptime std.debug.runtime_safety) {
if (getBits(TargetType, value, 0, (end_bit - start_bit)) != value) @panic("value exceeds bit range");
}
const bitmask: TargetType = comptime blk: {
var bitmask = ~@as(TargetType, 0);
bitmask <<= (@bitSizeOf(TargetType) - end_bit);
bitmask >>= (@bitSizeOf(TargetType) - end_bit);
bitmask >>= start_bit;
bitmask <<= start_bit;
break :blk ~bitmask;
};
return (target & bitmask) | (value << start_bit);
}
pub fn getBits(comptime TargetType: type, target: anytype, comptime start_bit: comptime_int, comptime number_of_bits: comptime_int) TargetType {
comptime {
if (number_of_bits == 0) @compileError("non-zero number_of_bits must be provided");
if (@typeInfo(TargetType) == .Int) {
if (@typeInfo(TargetType).Int.signedness != .unsigned) {
@compileError("requires an unsigned integer, found " ++ @typeName(TargetType));
}
if (start_bit >= @bitSizeOf(TargetType)) {
@compileError("start_bit index is out of bounds of the bit field");
}
if (start_bit + number_of_bits > @bitSizeOf(TargetType)) {
@compileError("start_bit + number_of_bits is out of bounds of the bit field");
}
} else if (@typeInfo(TargetType) == .ComptimeInt) {
if (target < 0) {
@compileError("requires an unsigned integer, found " ++ @typeName(TargetType));
}
} else {
@compileError("requires an unsigned integer, found " ++ @typeName(TargetType));
}
}
return @truncate(TargetType, target >> start_bit);
}
pub const Ref = enum(TotalSize) {
default = std.math.maxInt(TotalSize),
_,
pub const TotalSize = u62;
pub fn format(ref: Ref, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void {
try std.fmt.format(
writer,
"Ref: sourceIndex = {d}, innerIndex = {d}, is_source_contents_slice = {}",
.{
ref.sourceIndex(),
ref.innerIndex(),
ref.isSourceContentsSlice(),
},
);
}
const max_ref_int = std.math.maxInt(Ref.Int);
pub const BitInt = std.meta.Int(.unsigned, @bitSizeOf(Ref));
pub inline fn asBitInt(this: Ref) BitInt {
return @bitCast(BitInt, this);
}
// 2 bits of padding for whatever is the parent
pub const Int = u30;
pub const None = Ref.init(std.math.maxInt(u30), std.math.maxInt(u30), false);
pub const RuntimeRef = Ref.init(std.math.maxInt(u30), std.math.maxInt(u30) - 1, false);
const source_index_offset = 1;
const inner_index_offset = 1 + 30;
pub inline fn sourceIndex(this: Ref) Int {
return @truncate(Int, getBits(TotalSize, @enumToInt(this), source_index_offset, 30));
}
pub inline fn innerIndex(this: Ref) Int {
return @truncate(Int, getBits(TotalSize, @enumToInt(this), inner_index_offset, 30));
}
pub inline fn isSourceContentsSlice(this: Ref) bool {
return (getBits(TotalSize, @enumToInt(this), 0, 1) & 1) != 0;
}
pub fn atIndex(value: anytype) Ref {
return @intToEnum(Ref, setBits(TotalSize, 0, inner_index_offset, 30, @truncate(Int, value)));
}
pub fn init(inner_index: Int, source_index: Int, is_source_contents_slice: bool) Ref {
return @intToEnum(
Ref,
setBits(
TotalSize,
0,
0,
1,
@as(
TotalSize,
@boolToInt(is_source_contents_slice),
),
) | setBits(
TotalSize,
0,
source_index_offset,
30,
source_index,
) | setBits(
TotalSize,
0,
inner_index_offset,
30,
inner_index,
),
);
}
const Old = struct {
inner_index: Int = 0,
source_index: Int = std.math.maxInt(Int),
is_source_contents_slice: bool = false,
};
pub fn initSourceEnd(old: Old) Ref {
return init(old.inner_index, old.source_index, old.is_source_contents_slice);
}
pub fn toInt(int: anytype) Int {
return @intCast(Int, int);
}
pub fn hash(key: Ref) u32 {
return @truncate(u32, key.hash64());
}
pub inline fn asU64(key: Ref) u64 {
// This type isn't quite a u64 because it is used in a few other packed structs which have variables in them
// But, there are some footguns with the stage1 implementation of packed structs
// so it is safer to do comparisons as u64
// but we want to ensure that the value of the unused bits in the u64 are 0
// i have not looked at the assembly to verify that the unused bits default to 0
// so we set it to u64 0 just to be sure
return @as(u64, @enumToInt(key));
}
pub inline fn hash64(key: Ref) u64 {
return std.hash.Wyhash.hash(0, &@bitCast([8]u8, key.asU64()));
}
pub fn eql(ref: Ref, b: Ref) bool {
return asU64(ref) == b.asU64();
}
pub inline fn isNull(self: Ref) bool {
return self.eql(Ref.None);
}
pub fn isSourceIndexNull(int: anytype) bool {
return int == max_ref_int;
}
pub fn jsonStringify(self: *const Ref, options: anytype, writer: anytype) !void {
return try std.json.stringify([2]u32{ self.sourceIndex(), self.innerIndex() }, options, writer);
}
};
test "Ref" {
{
const first = .{ .inner_index = 0, .source_index = 1, .is_source_contents_slice = true };
const ref = Ref.initSourceEnd(.{ .inner_index = 0, .source_index = 1, .is_source_contents_slice = true });
try std.testing.expectEqual(ref.innerIndex(), first.inner_index);
try std.testing.expectEqual(ref.sourceIndex(), first.source_index);
try std.testing.expectEqual(ref.isSourceContentsSlice(), first.is_source_contents_slice);
}
{
const first = .{ .inner_index = 100, .source_index = 0, .is_source_contents_slice = true };
const ref = Ref.initSourceEnd(.{ .inner_index = 100, .source_index = 0, .is_source_contents_slice = true });
try std.testing.expectEqual(ref.innerIndex(), first.inner_index);
try std.testing.expectEqual(ref.sourceIndex(), first.source_index);
try std.testing.expectEqual(ref.isSourceContentsSlice(), first.is_source_contents_slice);
}
{
const first = .{ .inner_index = 20, .source_index = 100, .is_source_contents_slice = true };
const ref = Ref.initSourceEnd(.{ .inner_index = 20, .source_index = 100, .is_source_contents_slice = true });
try std.testing.expectEqual(ref.innerIndex(), first.inner_index);
try std.testing.expectEqual(ref.sourceIndex(), first.source_index);
try std.testing.expectEqual(ref.isSourceContentsSlice(), first.is_source_contents_slice);
}
{
const first = .{ .inner_index = 30, .source_index = 100, .is_source_contents_slice = false };
const ref = Ref.initSourceEnd(.{ .inner_index = 30, .source_index = 100, .is_source_contents_slice = false });
try std.testing.expectEqual(ref.innerIndex(), first.inner_index);
try std.testing.expectEqual(ref.sourceIndex(), first.source_index);
try std.testing.expectEqual(ref.isSourceContentsSlice(), first.is_source_contents_slice);
}
}
// This is kind of the wrong place, but it's shared between files
pub const RequireOrImportMeta = struct {
// CommonJS files will return the "require_*" wrapper function and an invalid
// exports object reference. Lazily-initialized ESM files will return the
// "init_*" wrapper function and the exports object for that file.
wrapper_ref: Ref = Ref.None,
exports_ref: Ref = Ref.None,
is_wrapper_async: bool = false,
};
|