aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/javascript/jsc/javascript.zig253
-rw-r--r--src/sourcemap/sourcemap.zig403
-rw-r--r--src/sourcemap/vlq_bench.zig239
3 files changed, 646 insertions, 249 deletions
diff --git a/src/javascript/jsc/javascript.zig b/src/javascript/jsc/javascript.zig
index 828bad39f..852c53562 100644
--- a/src/javascript/jsc/javascript.zig
+++ b/src/javascript/jsc/javascript.zig
@@ -19,7 +19,7 @@ const NetworkThread = @import("http").NetworkThread;
pub fn zigCast(comptime Destination: type, value: anytype) *Destination {
return @ptrCast(*Destination, @alignCast(@alignOf(*Destination), value));
}
-
+const IdentityContext = @import("../../identity_context.zig").IdentityContext;
const Fs = @import("../../fs.zig");
const Resolver = @import("../../resolver/resolver.zig");
const ast = @import("../../import_record.zig");
@@ -1701,9 +1701,73 @@ pub const Task = TaggedPointerUnion(.{
// TimeoutTasklet,
});
+const SourceMap = @import("../../sourcemap/sourcemap.zig");
+
+pub const SavedSourceMap = struct {
+ // For bun.js, we store the number of mappings and how many bytes the final list is at the beginning of the array
+ // The first 8 bytes are the length of the array
+ // The second 8 bytes are the number of mappings
+ pub const SavedMappings = struct {
+ data: [*]u8,
+
+ pub fn vlq(this: SavedMappings) []u8 {
+ return this.data[16..this.len()];
+ }
+
+ pub inline fn len(this: SavedMappings) usize {
+ return @bitCast(u64, this.data[0..8]);
+ }
+
+ pub fn deinit(this: SavedMappings) []u8 {
+ default_allocator.free(this.data[0..this.len()]);
+ }
+ };
+
+ pub const Value = TaggedPointerUnion(.{ SourceMap, SavedMappings });
+ pub const HashTable = std.HashMap(u64, *anyopaque, IdentityContext(u64), 80);
+
+ map: HashTable,
+
+ pub fn putMappings(this: *SavedSourceMap, source: logger.Source, mappings: MutableString) !void {
+ var entry = try this.map.getOrPut(std.hash.Wyhash.hash(0, source.path.text));
+ if (entry.found_existing) {
+ var value = Value.from(entry.value_ptr.*);
+ if (value.get(SourceMap)) |source_map_| {
+ var source_map: *SourceMap = source_map_;
+ source_map.deinit();
+ } else if (value.get(SavedMappings)) |saved_mappings| {
+ var saved = SavedMappings{ .data = @ptrCast([*]u8, saved_mappings) };
+
+ saved.deinit();
+ }
+ }
+
+ entry.value_ptr.* = Value.init(@ptrCast(*SavedMappings, mappings.list.items.ptr)).ptr();
+ }
+
+ pub fn get(this: *SavedSourceMap, allocator: std.mem.Allocator, path: string) ?*SourceMap {
+ var mapping = this.map.getEntry(std.hash.Wyhash.hash(0, path)) orelse return null;
+ switch (Value.from(mapping.value_ptr.*).tag()) {
+ SourceMap => {
+ return Value.from(mapping).as(SourceMap);
+ },
+ SavedMappings => {
+ _ = allocator;
+ return null;
+ },
+ else => return null,
+ }
+ }
+};
+
// If you read JavascriptCore/API/JSVirtualMachine.mm - https://github.com/WebKit/WebKit/blob/acff93fb303baa670c055cb24c2bad08691a01a0/Source/JavaScriptCore/API/JSVirtualMachine.mm#L101
// We can see that it's sort of like std.mem.Allocator but for JSGlobalContextRef, to support Automatic Reference Counting
// Its unavailable on Linux
+
+// JavaScriptCore expects 1 VM per thread
+// However, there can be many JSGlobalObject
+// We currently assume a 1:1 correspondence between the two.
+// This is technically innacurate
pub const VirtualMachine = struct {
global: *JSGlobalObject,
allocator: std.mem.Allocator,
@@ -1745,8 +1809,7 @@ pub const VirtualMachine = struct {
regular_event_loop: EventLoop = EventLoop{},
event_loop: *EventLoop = undefined,
- is_set_timeout_enabled: bool = false,
- is_set_interval_enabled: bool = false,
+ source_mappings: SavedSourceMap = undefined,
pub inline fn eventLoop(this: *VirtualMachine) *EventLoop {
return this.event_loop;
@@ -1940,7 +2003,7 @@ pub const VirtualMachine = struct {
.flush_list = std.ArrayList(string).init(allocator),
.blobs = if (_args.serve orelse false) try Blob.Group.init(allocator) else null,
.origin = bundler.options.origin,
-
+ .source_mappings = SavedSourceMap{ .map = SavedSourceMap.HashTable.init(allocator) },
.macros = MacroMap.init(allocator),
.macro_entry_points = @TypeOf(VirtualMachine.vm.macro_entry_points).init(allocator),
.origin_timer = std.time.Timer.start() catch @panic("Please don't mess with timers."),
@@ -1973,12 +2036,11 @@ pub const VirtualMachine = struct {
VirtualMachine.vm.regular_event_loop.global = VirtualMachine.vm.global;
VirtualMachine.vm_loaded = true;
- if (!source_code_printer_loaded) {
+ if (source_code_printer == null) {
var writer = try js_printer.BufferWriter.init(allocator);
- source_code_printer = js_printer.BufferPrinter.init(writer);
- source_code_printer.ctx.append_null_byte = false;
-
- source_code_printer_loaded = true;
+ source_code_printer = allocator.create(js_printer.BufferPrinter) catch unreachable;
+ source_code_printer.?.* = js_printer.BufferPrinter.init(writer);
+ source_code_printer.?.ctx.append_null_byte = false;
}
return VirtualMachine.vm;
@@ -1989,8 +2051,7 @@ pub const VirtualMachine = struct {
// }
- threadlocal var source_code_printer: js_printer.BufferPrinter = undefined;
- threadlocal var source_code_printer_loaded: bool = false;
+ threadlocal var source_code_printer: ?*js_printer.BufferPrinter = null;
pub fn preflush(this: *VirtualMachine) void {
// We flush on the next tick so that if there were any errors you can still see them
@@ -2014,15 +2075,16 @@ pub const VirtualMachine = struct {
log: *logger.Log,
) !ResolvedSource {
std.debug.assert(VirtualMachine.vm_loaded);
+ var jsc_vm = vm;
- if (vm.node_modules != null and strings.eqlComptime(_specifier, bun_file_import_path)) {
+ if (jsc_vm.node_modules != null and strings.eqlComptime(_specifier, bun_file_import_path)) {
// We kind of need an abstraction around this.
// Basically we should subclass JSC::SourceCode with:
// - hash
// - file descriptor for source input
// - file path + file descriptor for bytecode caching
// - separate bundles for server build vs browser build OR at least separate sections
- const code = try vm.node_modules.?.readCodeAsStringSlow(vm.allocator);
+ const code = try jsc_vm.node_modules.?.readCodeAsStringSlow(jsc_vm.allocator);
return ResolvedSource{
.allocator = null,
@@ -2031,7 +2093,7 @@ pub const VirtualMachine = struct {
.source_url = ZigString.init(bun_file_import_path[1..]),
.hash = 0, // TODO
};
- } else if (vm.node_modules == null and strings.eqlComptime(_specifier, Runtime.Runtime.Imports.Name)) {
+ } else if (jsc_vm.node_modules == null and strings.eqlComptime(_specifier, Runtime.Runtime.Imports.Name)) {
return ResolvedSource{
.allocator = null,
.source_code = ZigString.init(Runtime.Runtime.sourceContent(false)),
@@ -2043,17 +2105,17 @@ pub const VirtualMachine = struct {
// so it consistently handles bundled imports
// we can't take the shortcut of just directly importing the file, sadly.
} else if (strings.eqlComptime(_specifier, main_file_name)) {
- defer vm.transpiled_count += 1;
+ defer jsc_vm.transpiled_count += 1;
- var bundler = &vm.bundler;
- var old = vm.bundler.log;
- vm.bundler.log = log;
- vm.bundler.linker.log = log;
- vm.bundler.resolver.log = log;
+ var bundler = &jsc_vm.bundler;
+ var old = jsc_vm.bundler.log;
+ jsc_vm.bundler.log = log;
+ jsc_vm.bundler.linker.log = log;
+ jsc_vm.bundler.resolver.log = log;
defer {
- vm.bundler.log = old;
- vm.bundler.linker.log = old;
- vm.bundler.resolver.log = old;
+ jsc_vm.bundler.log = old;
+ jsc_vm.bundler.linker.log = old;
+ jsc_vm.bundler.resolver.log = old;
}
var jsx = bundler.options.jsx;
@@ -2066,30 +2128,33 @@ pub const VirtualMachine = struct {
opts.features.react_fast_refresh = false;
opts.filepath_hash_for_hmr = 0;
opts.warn_about_unbundled_modules = false;
- opts.macro_context = &vm.bundler.macro_context.?;
- const main_ast = (bundler.resolver.caches.js.parse(vm.allocator, opts, bundler.options.define, bundler.log, &vm.entry_point.source) catch null) orelse {
+ opts.macro_context = &jsc_vm.bundler.macro_context.?;
+ const main_ast = (bundler.resolver.caches.js.parse(jsc_vm.allocator, opts, bundler.options.define, bundler.log, &jsc_vm.entry_point.source) catch null) orelse {
return error.ParseError;
};
- var parse_result = ParseResult{ .source = vm.entry_point.source, .ast = main_ast, .loader = .js, .input_fd = null };
+ var parse_result = ParseResult{ .source = jsc_vm.entry_point.source, .ast = main_ast, .loader = .js, .input_fd = null };
var file_path = Fs.Path.init(bundler.fs.top_level_dir);
file_path.name.dir = bundler.fs.top_level_dir;
file_path.name.base = "bun:main";
try bundler.linker.link(
file_path,
&parse_result,
- vm.origin,
+ jsc_vm.origin,
.absolute_path,
false,
);
-
- source_code_printer.ctx.reset();
-
- var written = try vm.bundler.print(
- parse_result,
- @TypeOf(&source_code_printer),
- &source_code_printer,
- .esm_ascii,
- );
+ var printer = source_code_printer.?.*;
+ var written: usize = undefined;
+ printer.ctx.reset();
+ {
+ defer source_code_printer.?.* = printer;
+ written = try jsc_vm.bundler.print(
+ parse_result,
+ @TypeOf(&printer),
+ &printer,
+ .esm_ascii,
+ );
+ }
if (written == 0) {
return error.PrintingErrorWriteFailed;
@@ -2097,7 +2162,7 @@ pub const VirtualMachine = struct {
return ResolvedSource{
.allocator = null,
- .source_code = ZigString.init(vm.allocator.dupe(u8, source_code_printer.ctx.written) catch unreachable),
+ .source_code = ZigString.init(jsc_vm.allocator.dupe(u8, printer.ctx.written) catch unreachable),
.specifier = ZigString.init(std.mem.span(main_file_name)),
.source_url = ZigString.init(std.mem.span(main_file_name)),
.hash = 0,
@@ -2105,7 +2170,7 @@ pub const VirtualMachine = struct {
} else if (_specifier.len > js_ast.Macro.namespaceWithColon.len and
strings.eqlComptimeIgnoreLen(_specifier[0..js_ast.Macro.namespaceWithColon.len], js_ast.Macro.namespaceWithColon))
{
- if (vm.macro_entry_points.get(MacroEntryPoint.generateIDFromSpecifier(_specifier))) |entry| {
+ if (jsc_vm.macro_entry_points.get(MacroEntryPoint.generateIDFromSpecifier(_specifier))) |entry| {
return ResolvedSource{
.allocator = null,
.source_code = ZigString.init(entry.source.contents),
@@ -2137,20 +2202,20 @@ pub const VirtualMachine = struct {
std.debug.assert(std.fs.path.isAbsolute(specifier)); // if this crashes, it means the resolver was skipped.
const path = Fs.Path.init(specifier);
- const loader = vm.bundler.options.loaders.get(path.name.ext) orelse .file;
+ const loader = jsc_vm.bundler.options.loaders.get(path.name.ext) orelse .file;
switch (loader) {
.js, .jsx, .ts, .tsx, .json, .toml => {
- vm.transpiled_count += 1;
- vm.bundler.resetStore();
+ jsc_vm.transpiled_count += 1;
+ jsc_vm.bundler.resetStore();
const hash = http.Watcher.getHash(path.text);
- var allocator = if (vm.has_loaded) vm.arena.allocator() else vm.allocator;
+ var allocator = if (jsc_vm.has_loaded) jsc_vm.arena.allocator() else jsc_vm.allocator;
var fd: ?StoredFileDescriptorType = null;
var package_json: ?*PackageJSON = null;
- if (vm.watcher) |watcher| {
+ if (jsc_vm.watcher) |watcher| {
if (watcher.indexOf(hash)) |index| {
const _fd = watcher.watchlist.items(.fd)[index];
fd = if (_fd > 0) _fd else null;
@@ -2158,24 +2223,24 @@ pub const VirtualMachine = struct {
}
}
- var old = vm.bundler.log;
- vm.bundler.log = log;
- vm.bundler.linker.log = log;
- vm.bundler.resolver.log = log;
+ var old = jsc_vm.bundler.log;
+ jsc_vm.bundler.log = log;
+ jsc_vm.bundler.linker.log = log;
+ jsc_vm.bundler.resolver.log = log;
defer {
- vm.bundler.log = old;
- vm.bundler.linker.log = old;
- vm.bundler.resolver.log = old;
+ jsc_vm.bundler.log = old;
+ jsc_vm.bundler.linker.log = old;
+ jsc_vm.bundler.resolver.log = old;
}
// this should be a cheap lookup because 24 bytes == 8 * 3 so it's read 3 machine words
const is_node_override = specifier.len > "/bun-vfs/node_modules/".len and strings.eqlComptimeIgnoreLen(specifier[0.."/bun-vfs/node_modules/".len], "/bun-vfs/node_modules/");
- const macro_remappings = if (vm.macro_mode or !vm.has_any_macro_remappings or is_node_override)
+ const macro_remappings = if (jsc_vm.macro_mode or !jsc_vm.has_any_macro_remappings or is_node_override)
MacroRemap{}
else
- vm.bundler.options.macro_remap;
+ jsc_vm.bundler.options.macro_remap;
var fallback_source: logger.Source = undefined;
@@ -2187,7 +2252,7 @@ pub const VirtualMachine = struct {
.file_descriptor = fd,
.file_hash = hash,
.macro_remappings = macro_remappings,
- .jsx = vm.bundler.options.jsx,
+ .jsx = jsc_vm.bundler.options.jsx,
};
if (is_node_override) {
@@ -2198,57 +2263,61 @@ pub const VirtualMachine = struct {
}
}
- var parse_result = vm.bundler.parse(
+ var parse_result = jsc_vm.bundler.parse(
parse_options,
null,
) orelse {
return error.ParseError;
};
- const start_count = vm.bundler.linker.import_counter;
+ const start_count = jsc_vm.bundler.linker.import_counter;
// We _must_ link because:
// - node_modules bundle won't be properly
- try vm.bundler.linker.link(
+ try jsc_vm.bundler.linker.link(
path,
&parse_result,
- vm.origin,
+ jsc_vm.origin,
.absolute_path,
false,
);
- if (!vm.macro_mode)
- vm.resolved_count += vm.bundler.linker.import_counter - start_count;
- vm.bundler.linker.import_counter = 0;
+ if (!jsc_vm.macro_mode)
+ jsc_vm.resolved_count += jsc_vm.bundler.linker.import_counter - start_count;
+ jsc_vm.bundler.linker.import_counter = 0;
- source_code_printer.ctx.reset();
-
- var written = try vm.bundler.print(
- parse_result,
- @TypeOf(&source_code_printer),
- &source_code_printer,
- .esm_ascii,
- );
+ var printer = source_code_printer.?.*;
+ var written: usize = undefined;
+ printer.ctx.reset();
+ {
+ defer source_code_printer.?.* = printer;
+ written = try jsc_vm.bundler.print(
+ parse_result,
+ @TypeOf(&printer),
+ &printer,
+ .esm_ascii,
+ );
+ }
if (written == 0) {
return error.PrintingErrorWriteFailed;
}
return ResolvedSource{
- .allocator = if (vm.has_loaded) &vm.allocator else null,
- .source_code = ZigString.init(vm.allocator.dupe(u8, source_code_printer.ctx.written) catch unreachable),
+ .allocator = if (jsc_vm.has_loaded) &jsc_vm.allocator else null,
+ .source_code = ZigString.init(jsc_vm.allocator.dupe(u8, printer.ctx.written) catch unreachable),
.specifier = ZigString.init(specifier),
.source_url = ZigString.init(path.text),
.hash = 0,
};
},
// .wasm => {
- // vm.transpiled_count += 1;
+ // jsc_vm.transpiled_count += 1;
// var fd: ?StoredFileDescriptorType = null;
- // var allocator = if (vm.has_loaded) vm.arena.allocator() else vm.allocator;
+ // var allocator = if (jsc_vm.has_loaded) jsc_vm.arena.allocator() else jsc_vm.allocator;
// const hash = http.Watcher.getHash(path.text);
- // if (vm.watcher) |watcher| {
+ // if (jsc_vm.watcher) |watcher| {
// if (watcher.indexOf(hash)) |index| {
// const _fd = watcher.watchlist.items(.fd)[index];
// fd = if (_fd > 0) _fd else null;
@@ -2263,10 +2332,10 @@ pub const VirtualMachine = struct {
// .file_descriptor = fd,
// .file_hash = hash,
// .macro_remappings = MacroRemap{},
- // .jsx = vm.bundler.options.jsx,
+ // .jsx = jsc_vm.bundler.options.jsx,
// };
- // var parse_result = vm.bundler.parse(
+ // var parse_result = jsc_vm.bundler.parse(
// parse_options,
// null,
// ) orelse {
@@ -2274,8 +2343,8 @@ pub const VirtualMachine = struct {
// };
// return ResolvedSource{
- // .allocator = if (vm.has_loaded) &vm.allocator else null,
- // .source_code = ZigString.init(vm.allocator.dupe(u8, parse_result.source.contents) catch unreachable),
+ // .allocator = if (jsc_vm.has_loaded) &jsc_vm.allocator else null,
+ // .source_code = ZigString.init(jsc_vm.allocator.dupe(u8, parse_result.source.contents) catch unreachable),
// .specifier = ZigString.init(specifier),
// .source_url = ZigString.init(path.text),
// .hash = 0,
@@ -2285,7 +2354,7 @@ pub const VirtualMachine = struct {
else => {
return ResolvedSource{
.allocator = &vm.allocator,
- .source_code = ZigString.init(try strings.quotedAlloc(VirtualMachine.vm.allocator, path.pretty)),
+ .source_code = ZigString.init(try strings.quotedAlloc(jsc_vm.allocator, path.pretty)),
.specifier = ZigString.init(path.text),
.source_url = ZigString.init(path.text),
.hash = 0,
@@ -2300,16 +2369,20 @@ pub const VirtualMachine = struct {
fn _resolve(ret: *ResolveFunctionResult, _: *JSGlobalObject, specifier: string, source: string) !void {
std.debug.assert(VirtualMachine.vm_loaded);
+ // macOS threadlocal vars are very slow
+ // we won't change threads in this function
+ // so we can copy it here
+ var jsc_vm = vm;
- if (vm.node_modules == null and strings.eqlComptime(std.fs.path.basename(specifier), Runtime.Runtime.Imports.alt_name)) {
+ if (jsc_vm.node_modules == null and strings.eqlComptime(std.fs.path.basename(specifier), Runtime.Runtime.Imports.alt_name)) {
ret.path = Runtime.Runtime.Imports.Name;
return;
- } else if (vm.node_modules != null and strings.eqlComptime(specifier, bun_file_import_path)) {
+ } else if (jsc_vm.node_modules != null and strings.eqlComptime(specifier, bun_file_import_path)) {
ret.path = bun_file_import_path;
return;
} else if (strings.eqlComptime(specifier, main_file_name)) {
ret.result = null;
- ret.path = vm.entry_point.source.path.text;
+ ret.path = jsc_vm.entry_point.source.path.text;
return;
} else if (specifier.len > js_ast.Macro.namespaceWithColon.len and strings.eqlComptimeIgnoreLen(specifier[0..js_ast.Macro.namespaceWithColon.len], js_ast.Macro.namespaceWithColon)) {
ret.result = null;
@@ -2332,25 +2405,25 @@ pub const VirtualMachine = struct {
const is_special_source = strings.eqlComptime(source, main_file_name) or js_ast.Macro.isMacroPath(source);
- const result = try vm.bundler.resolver.resolve(
- if (!is_special_source) Fs.PathName.init(source).dirWithTrailingSlash() else VirtualMachine.vm.bundler.fs.top_level_dir,
+ const result = try jsc_vm.bundler.resolver.resolve(
+ if (!is_special_source) Fs.PathName.init(source).dirWithTrailingSlash() else jsc_vm.bundler.fs.top_level_dir,
specifier,
.stmt,
);
- if (!vm.macro_mode) {
- vm.has_any_macro_remappings = vm.has_any_macro_remappings or vm.bundler.options.macro_remap.count() > 0;
+ if (!jsc_vm.macro_mode) {
+ jsc_vm.has_any_macro_remappings = jsc_vm.has_any_macro_remappings or jsc_vm.bundler.options.macro_remap.count() > 0;
}
ret.result = result;
const result_path = result.pathConst() orelse return error.ModuleNotFound;
- vm.resolved_count += 1;
+ jsc_vm.resolved_count += 1;
- if (vm.node_modules != null and !strings.eqlComptime(result_path.namespace, "node") and result.isLikelyNodeModule()) {
- const node_modules_bundle = vm.node_modules.?;
+ if (jsc_vm.node_modules != null and !strings.eqlComptime(result_path.namespace, "node") and result.isLikelyNodeModule()) {
+ const node_modules_bundle = jsc_vm.node_modules.?;
node_module_checker: {
const package_json = result.package_json orelse brk: {
- if (vm.bundler.resolver.packageJSONForResolvedNodeModule(&result)) |pkg| {
+ if (jsc_vm.bundler.resolver.packageJSONForResolvedNodeModule(&result)) |pkg| {
break :brk pkg;
} else {
break :node_module_checker;
@@ -2374,7 +2447,7 @@ pub const VirtualMachine = struct {
std.debug.assert(strings.eql(node_modules_bundle.str(package.name), package_json.name));
}
- const package_relative_path = vm.bundler.fs.relative(
+ const package_relative_path = jsc_vm.bundler.fs.relative(
package_json.source.path.name.dirWithTrailingSlash(),
result_path.text,
);
diff --git a/src/sourcemap/sourcemap.zig b/src/sourcemap/sourcemap.zig
index e043a493b..dc8de4b43 100644
--- a/src/sourcemap/sourcemap.zig
+++ b/src/sourcemap/sourcemap.zig
@@ -9,7 +9,6 @@ const BabyList = JSAst.BabyList;
const Logger = @import("../logger.zig");
const strings = @import("../string_immutable.zig");
const MutableString = @import("../string_mutable.zig").MutableString;
-const base64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
const Joiner = @import("../string_joiner.zig");
const JSPrinter = @import("../js_printer.zig");
const URL = @import("../query_string_map.zig").URL;
@@ -17,18 +16,6 @@ const FileSystem = @import("../fs.zig").FileSystem;
const SourceMap = @This();
-const vlq_max_in_bytes = 8;
-pub const VLQ = struct {
- // We only need to worry about i32
- // That means the maximum VLQ-encoded value is 8 bytes
- // because there are only 4 bits of number inside each VLQ value
- // and it expects i32
- // therefore, it can never be more than 32 bits long
- // I believe the actual number is 7 bytes long, however we can add an extra byte to be more cautious
- bytes: [vlq_max_in_bytes]u8,
- len: u4 = 0,
-};
-
/// Coordinates in source maps are stored using relative offsets for size
/// reasons. When joining together chunks of a source map that were emitted
/// in parallel for different parts of a file, we need to fix up the first
@@ -55,7 +42,7 @@ pub const Mapping = struct {
original: LineColumnOffset,
source_index: i32,
- pub const List = std.MultiArrayList(Mapping);
+ pub const List = std.ArrayList(Mapping);
pub inline fn generatedLine(mapping: Mapping) i32 {
return mapping.generated.lines;
@@ -190,6 +177,18 @@ const vlq_lookup_table: [256]VLQ = brk: {
break :brk entries;
};
+const vlq_max_in_bytes = 8;
+pub const VLQ = struct {
+ // We only need to worry about i32
+ // That means the maximum VLQ-encoded value is 8 bytes
+ // because there are only 4 bits of number inside each VLQ value
+ // and it expects i32
+ // therefore, it can never be more than 32 bits long
+ // I believe the actual number is 7 bytes long, however we can add an extra byte to be more cautious
+ bytes: [vlq_max_in_bytes]u8,
+ len: u4 = 0,
+};
+
pub fn encodeVLQWithLookupTable(
value: i32,
) VLQ {
@@ -224,7 +223,6 @@ test "decodeVLQ" {
.{ -1, "D" },
.{ 123, "2H" },
.{ 123456789, "qxmvrH" },
- .{ 8, "Q" },
};
inline for (fixtures) |fixture| {
const result = decodeVLQ(fixture[1], 0);
@@ -258,8 +256,7 @@ pub fn encodeVLQ(
else
@bitCast(u32, (-value << 1) | 1);
- // The max amount of digits a VLQ value for sourcemaps can contain is 9
- // therefore, we can unroll the loop
+ // source mappings are limited to i32
comptime var i: usize = 0;
inline while (i < vlq_max_in_bytes) : (i += 1) {
var digit = vlq & 31;
@@ -287,6 +284,9 @@ pub const VLQResult = struct {
start: usize = 0,
};
+const base64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+// base64 stores values up to 7 bits
const base64_lut: [std.math.maxInt(u7)]u7 = brk: {
@setEvalBranchQuota(9999);
var bytes = [_]u7{std.math.maxInt(u7)} ** std.math.maxInt(u7);
@@ -302,12 +302,11 @@ pub fn decodeVLQ(encoded: []const u8, start: usize) VLQResult {
var shift: u8 = 0;
var vlq: u32 = 0;
- // it will never exceed 9
- // by doing it this way, we can hint to the compiler that it will not exceed 9
+ // hint to the compiler what the maximum value is
const encoded_ = encoded[start..][0..@minimum(encoded.len - start, comptime (vlq_max_in_bytes + 1))];
+ // inlining helps for the 1 or 2 byte case, hurts a little for larger
comptime var i: usize = 0;
-
inline while (i < vlq_max_in_bytes + 1) : (i += 1) {
const index = @as(u32, base64_lut[@truncate(u7, encoded_[i])]);
@@ -473,6 +472,7 @@ pub const LineOffsetTable = struct {
.byte_offset_to_first_non_ascii = byte_offset_to_first_non_ascii,
.columns_for_non_ascii = BabyList(i32).fromList(columns_list),
}) catch unreachable;
+
column = 0;
byte_offset_to_first_non_ascii = 0;
column_byte_offset = 0;
@@ -577,6 +577,8 @@ pub fn appendMappingToBuffer(buffer_: MutableString, last_byte: u8, prev_state:
pub const Chunk = struct {
buffer: MutableString,
+ mappings_count: usize = 0,
+
/// This end state will be used to rewrite the start of the following source
/// map chunk so that the delta-encoded VLQ numbers are preserved.
end_state: SourceMapState = .{},
@@ -625,171 +627,260 @@ pub const Chunk = struct {
return output;
}
- pub const Builder = struct {
- input_source_map: ?*SourceMap = null,
- source_map: MutableString,
- line_offset_tables: LineOffsetTable.List = .{},
- prev_state: SourceMapState = SourceMapState{},
- last_generated_update: u32 = 0,
- generated_column: i32 = 0,
- prev_loc: Logger.Loc = Logger.Loc.Empty,
- has_prev_state: bool = false,
-
- // This is a workaround for a bug in the popular "source-map" library:
- // https://github.com/mozilla/source-map/issues/261. The library will
- // sometimes return null when querying a source map unless every line
- // starts with a mapping at column zero.
- //
- // The workaround is to replicate the previous mapping if a line ends
- // up not starting with a mapping. This is done lazily because we want
- // to avoid replicating the previous mapping if we don't need to.
- line_starts_with_mapping: bool = false,
- cover_lines_without_mappings: bool = false,
-
- pub fn generateChunk(b: *Builder, output: []const u8) Chunk {
- b.updateGeneratedLineAndColumn(output);
- return Chunk{
- .buffer = b.source_map,
- .end_state = b.prev_state,
- .final_generated_column = b.generated_column,
- .should_ignore = !strings.containsAnyBesidesChar(b.source_map.list.items, ';'),
- };
- }
+ pub fn SourceMapFormat(comptime Type: type) type {
+ return struct {
+ ctx: Type,
+ const Format = @This();
- // Scan over the printed text since the last source mapping and update the
- // generated line and column numbers
- pub fn updateGeneratedLineAndColumn(b: *Builder, output: []const u8) void {
- const slice = output[b.last_generated_update..];
- var needs_mapping = b.cover_lines_without_mappings and !b.line_starts_with_mapping and b.has_prev_state;
+ pub fn init(allocator: std.mem.Allocator) Format {
+ return Format{ .ctx = Type.init(allocator) };
+ }
- var i: usize = 0;
- const n = @intCast(usize, slice.len);
- var c: i32 = 0;
- while (i < n) {
- const len = strings.wtf8ByteSequenceLength(slice[i]);
- c = strings.decodeWTF8RuneT(slice[i..].ptr[0..4], len, i32, strings.unicode_replacement);
- i += @as(usize, len);
+ pub inline fn appendLineSeparator(this: *Format) anyerror!void {
+ try this.ctx.appendLineSeparator();
+ }
- switch (c) {
- 14...127 => {
- if (strings.indexOfNewlineOrNonASCII(slice, @intCast(u32, i))) |j| {
- b.generated_column += @intCast(i32, (@as(usize, j) - i) + 1);
- i = j;
- continue;
- } else {
- b.generated_column += @intCast(i32, slice[i..].len);
- i = n;
- break;
- }
- },
- '\r', '\n', 0x2028, 0x2029 => {
- // windows newline
- if (c == '\r') {
- const newline_check = b.last_generated_update + i;
- if (newline_check < output.len and output[newline_check] == '\n') {
- continue;
- }
- }
+ pub inline fn append(this: *Format, current_state: SourceMapState, prev_state: SourceMapState) anyerror!void {
+ try this.ctx.append(current_state, prev_state);
+ }
- // If we're about to move to the next line and the previous line didn't have
- // any mappings, add a mapping at the start of the previous line.
- if (needs_mapping) {
- b.appendMappingWithoutRemapping(.{
- .generated_line = b.prev_state.generated_line,
- .generated_column = 0,
- .source_index = b.prev_state.source_index,
- .original_line = b.prev_state.original_line,
- .original_column = b.prev_state.original_column,
- });
- }
+ pub inline fn shouldIgnore(this: Format) bool {
+ return this.ctx.shouldIgnore();
+ }
+
+ pub inline fn getBuffer(this: Format) MutableString {
+ return this.ctx.getBuffer();
+ }
- b.prev_state.generated_line += 1;
- b.prev_state.generated_column = 0;
- b.generated_column = 0;
- b.source_map.appendChar(';') catch unreachable;
+ pub inline fn getCount(this: Format) usize {
+ return this.ctx.getCount();
+ }
+ };
+ }
- // This new line doesn't have a mapping yet
- b.line_starts_with_mapping = false;
+ pub const VLQSourceMap = struct {
+ data: MutableString,
+ count: usize = 0,
+ offset: usize = 0,
- needs_mapping = b.cover_lines_without_mappings and !b.line_starts_with_mapping and b.has_prev_state;
- },
+ pub const Format = SourceMapFormat(VLQSourceMap);
- else => {
- // Mozilla's "source-map" library counts columns using UTF-16 code units
- b.generated_column += @as(i32, @boolToInt(c > 0xFFFF)) + 1;
- },
- }
+ pub fn init(allocator: std.mem.Allocator, prepend_count: bool) VLQSourceMap {
+ var map = VLQSourceMap{
+ .data = MutableString.initEmpty(allocator),
+ };
+
+ // For bun.js, we store the number of mappings and how many bytes the final list is at the beginning of the array
+ if (prepend_count) {
+ map.offset = 16;
+ map.data.append([16]u8{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }) catch unreachable;
}
- b.last_generated_update = @truncate(u32, output.len);
+ return map;
}
- pub fn appendMapping(b: *Builder, current_state_: SourceMapState) void {
- var current_state = current_state_;
- // If the input file had a source map, map all the way back to the original
- if (b.input_source_map) |input| {
- if (input.find(current_state.original_line, current_state.original_column)) |mapping| {
- current_state.source_index = mapping.sourceIndex();
- current_state.original_line = mapping.originalLine();
- current_state.original_column = mapping.originalColumn();
- }
- }
-
- b.appendMappingWithoutRemapping(current_state);
+ pub fn appendLineSeparator(this: *VLQSourceMap) anyerror!void {
+ try this.data.appendChar(';');
}
- pub fn appendMappingWithoutRemapping(b: *Builder, current_state: SourceMapState) void {
- const last_byte: u8 = if (b.source_map.list.items.len > 0)
- b.source_map.list.items[b.source_map.list.items.len - 1]
+ pub fn append(this: *VLQSourceMap, current_state: SourceMapState, prev_state: SourceMapState) anyerror!void {
+ const last_byte: u8 = if (this.data.list.items.len > this.offset)
+ this.data.list.items[this.data.list.items.len - 1]
else
0;
- b.source_map = appendMappingToBuffer(b.source_map, last_byte, b.prev_state, current_state);
- b.prev_state = current_state;
- b.has_prev_state = true;
+ this.data = appendMappingToBuffer(this.data, last_byte, prev_state, current_state);
+ this.count += 1;
+ }
+
+ pub fn shouldIgnore(this: VLQSourceMap) bool {
+ return this.count == 0;
}
- pub fn addSourceMapping(b: *Builder, loc: Logger.Loc, output: []const u8) void {
- // exclude generated code from source
- if (b.prev_loc.eql(loc) or loc.start == Logger.Loc.Empty.start) {
- return;
+ pub fn getBuffer(this: VLQSourceMap) MutableString {
+ return this.data;
+ }
+
+ pub fn getCount(this: VLQSourceMap) usize {
+ return this.count;
+ }
+ };
+
+ pub fn NewBuilder(comptime SourceMapFormatType: type) type {
+ return struct {
+ const ThisBuilder = @This();
+ input_source_map: ?*SourceMap = null,
+ source_map: SourceMapper,
+ line_offset_tables: LineOffsetTable.List = .{},
+ prev_state: SourceMapState = SourceMapState{},
+ last_generated_update: u32 = 0,
+ generated_column: i32 = 0,
+ prev_loc: Logger.Loc = Logger.Loc.Empty,
+ has_prev_state: bool = false,
+
+ // This is a workaround for a bug in the popular "source-map" library:
+ // https://github.com/mozilla/source-map/issues/261. The library will
+ // sometimes return null when querying a source map unless every line
+ // starts with a mapping at column zero.
+ //
+ // The workaround is to replicate the previous mapping if a line ends
+ // up not starting with a mapping. This is done lazily because we want
+ // to avoid replicating the previous mapping if we don't need to.
+ line_starts_with_mapping: bool = false,
+ cover_lines_without_mappings: bool = false,
+
+ /// When generating sourcemappings for bun, we store a count of how many mappings there were
+ prepend_count: bool = false,
+
+ pub const SourceMapper = SourceMapFormat(SourceMapFormatType);
+
+ pub fn generateChunk(b: *ThisBuilder, output: []const u8) Chunk {
+ b.updateGeneratedLineAndColumn(output);
+ if (b.prepend_count) {
+ b.source_map.getBuffer().list.items[0..8].* = @bitCast([8]u8, b.source_map.getBuffer().list.items.len);
+ b.source_map.getBuffer().list.items[8..16].* = @bitCast([8]u8, b.source_map.getCount());
+ }
+ return Chunk{
+ .buffer = b.source_map.getBuffer(),
+ .mappings_count = b.source_map.getCount(),
+ .end_state = b.prev_state,
+ .final_generated_column = b.generated_column,
+ .should_ignore = !b.source_map.shouldIgnore(),
+ };
}
- b.prev_loc = loc;
- const list = b.line_offset_tables;
- const original_line = LineOffsetTable.findLine(list, loc);
- const line = list.get(@intCast(usize, @maximum(original_line, 0)));
+ // Scan over the printed text since the last source mapping and update the
+ // generated line and column numbers
+ pub fn updateGeneratedLineAndColumn(b: *ThisBuilder, output: []const u8) void {
+ const slice = output[b.last_generated_update..];
+ var needs_mapping = b.cover_lines_without_mappings and !b.line_starts_with_mapping and b.has_prev_state;
+
+ var i: usize = 0;
+ const n = @intCast(usize, slice.len);
+ var c: i32 = 0;
+ while (i < n) {
+ const len = strings.wtf8ByteSequenceLength(slice[i]);
+ c = strings.decodeWTF8RuneT(slice[i..].ptr[0..4], len, i32, strings.unicode_replacement);
+ i += @as(usize, len);
+
+ switch (c) {
+ 14...127 => {
+ if (strings.indexOfNewlineOrNonASCII(slice, @intCast(u32, i))) |j| {
+ b.generated_column += @intCast(i32, (@as(usize, j) - i) + 1);
+ i = j;
+ continue;
+ } else {
+ b.generated_column += @intCast(i32, slice[i..].len);
+ i = n;
+ break;
+ }
+ },
+ '\r', '\n', 0x2028, 0x2029 => {
+ // windows newline
+ if (c == '\r') {
+ const newline_check = b.last_generated_update + i;
+ if (newline_check < output.len and output[newline_check] == '\n') {
+ continue;
+ }
+ }
+
+ // If we're about to move to the next line and the previous line didn't have
+ // any mappings, add a mapping at the start of the previous line.
+ if (needs_mapping) {
+ b.appendMappingWithoutRemapping(.{
+ .generated_line = b.prev_state.generated_line,
+ .generated_column = 0,
+ .source_index = b.prev_state.source_index,
+ .original_line = b.prev_state.original_line,
+ .original_column = b.prev_state.original_column,
+ });
+ }
+
+ b.prev_state.generated_line += 1;
+ b.prev_state.generated_column = 0;
+ b.generated_column = 0;
+ b.source_map.appendLineSeparator();
- // Use the line to compute the column
- var original_column = loc.start - @intCast(i32, line.byte_offset_to_start_of_line);
- if (line.columns_for_non_ascii.len > 0 and original_column >= @intCast(i32, line.byte_offset_to_first_non_ascii)) {
- original_column = line.columns_for_non_ascii.ptr[@intCast(u32, original_column) - line.byte_offset_to_first_non_ascii];
+ // This new line doesn't have a mapping yet
+ b.line_starts_with_mapping = false;
+
+ needs_mapping = b.cover_lines_without_mappings and !b.line_starts_with_mapping and b.has_prev_state;
+ },
+
+ else => {
+ // Mozilla's "source-map" library counts columns using UTF-16 code units
+ b.generated_column += @as(i32, @boolToInt(c > 0xFFFF)) + 1;
+ },
+ }
+ }
+
+ b.last_generated_update = @truncate(u32, output.len);
+ }
+
+ pub fn appendMapping(b: *ThisBuilder, current_state_: SourceMapState) void {
+ var current_state = current_state_;
+ // If the input file had a source map, map all the way back to the original
+ if (b.input_source_map) |input| {
+ if (input.find(current_state.original_line, current_state.original_column)) |mapping| {
+ current_state.source_index = mapping.sourceIndex();
+ current_state.original_line = mapping.originalLine();
+ current_state.original_column = mapping.originalColumn();
+ }
+ }
+
+ b.appendMappingWithoutRemapping(current_state);
}
- b.updateGeneratedLineAndColumn(output);
+ pub fn appendMappingWithoutRemapping(b: *ThisBuilder, current_state: SourceMapState) void {
+ try b.source_map.append(current_state, b.prev_state);
+ b.prev_state = current_state;
+ b.has_prev_state = true;
+ }
- // If this line doesn't start with a mapping and we're about to add a mapping
- // that's not at the start, insert a mapping first so the line starts with one.
- if (b.cover_lines_without_mappings and !b.line_starts_with_mapping and b.generated_column > 0 and b.has_prev_state) {
- b.appendMappingWithoutRemapping(.{
+ pub fn addSourceMapping(b: *ThisBuilder, loc: Logger.Loc, output: []const u8) void {
+ // exclude generated code from source
+ if (b.prev_loc.eql(loc) or loc.start == Logger.Loc.Empty.start) {
+ return;
+ }
+
+ b.prev_loc = loc;
+ const list = b.line_offset_tables;
+ const original_line = LineOffsetTable.findLine(list, loc);
+ const line = list.get(@intCast(usize, @maximum(original_line, 0)));
+
+ // Use the line to compute the column
+ var original_column = loc.start - @intCast(i32, line.byte_offset_to_start_of_line);
+ if (line.columns_for_non_ascii.len > 0 and original_column >= @intCast(i32, line.byte_offset_to_first_non_ascii)) {
+ original_column = line.columns_for_non_ascii.ptr[@intCast(u32, original_column) - line.byte_offset_to_first_non_ascii];
+ }
+
+ b.updateGeneratedLineAndColumn(output);
+
+ // If this line doesn't start with a mapping and we're about to add a mapping
+ // that's not at the start, insert a mapping first so the line starts with one.
+ if (b.cover_lines_without_mappings and !b.line_starts_with_mapping and b.generated_column > 0 and b.has_prev_state) {
+ b.appendMappingWithoutRemapping(.{
+ .generated_line = b.prev_state.generated_line,
+ .generated_column = 0,
+ .source_index = b.prev_state.source_index,
+ .original_line = b.prev_state.original_line,
+ .original_column = b.prev_state.original_column,
+ });
+ }
+
+ b.appendMapping(.{
.generated_line = b.prev_state.generated_line,
- .generated_column = 0,
+ .generated_column = b.generated_column,
.source_index = b.prev_state.source_index,
- .original_line = b.prev_state.original_line,
+ .original_line = original_line,
.original_column = b.prev_state.original_column,
});
- }
- b.appendMapping(.{
- .generated_line = b.prev_state.generated_line,
- .generated_column = b.generated_column,
- .source_index = b.prev_state.source_index,
- .original_line = original_line,
- .original_column = b.prev_state.original_column,
- });
+ // This line now has a mapping on it, so don't insert another one
+ b.line_starts_with_mapping = true;
+ }
+ };
+ }
- // This line now has a mapping on it, so don't insert another one
- b.line_starts_with_mapping = true;
- }
- };
+ pub const Builder = NewBuilder(VLQSourceMap);
};
diff --git a/src/sourcemap/vlq_bench.zig b/src/sourcemap/vlq_bench.zig
index 20f88a21d..e6ea2724f 100644
--- a/src/sourcemap/vlq_bench.zig
+++ b/src/sourcemap/vlq_bench.zig
@@ -1,6 +1,137 @@
const std = @import("std");
-const SourceMap = @import("./sourcemap.zig");
+const SourceMap = struct {
+ const base64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+ const vlq_lookup_table: [256]VLQ = brk: {
+ var entries: [256]VLQ = undefined;
+ var i: usize = 0;
+ var j: i32 = 0;
+ while (i < 256) : (i += 1) {
+ entries[i] = encodeVLQ(j);
+ j += 1;
+ }
+ break :brk entries;
+ };
+
+ const vlq_max_in_bytes = 8;
+ pub const VLQ = struct {
+ // We only need to worry about i32
+ // That means the maximum VLQ-encoded value is 8 bytes
+ // because there are only 4 bits of number inside each VLQ value
+ // and it expects i32
+ // therefore, it can never be more than 32 bits long
+ // I believe the actual number is 7 bytes long, however we can add an extra byte to be more cautious
+ bytes: [vlq_max_in_bytes]u8,
+ len: u4 = 0,
+ };
+
+ pub fn encodeVLQWithLookupTable(
+ value: i32,
+ ) VLQ {
+ return if (value >= 0 and value <= 255)
+ vlq_lookup_table[@intCast(usize, value)]
+ else
+ encodeVLQ(value);
+ }
+
+ // A single base 64 digit can contain 6 bits of data. For the base 64 variable
+ // length quantities we use in the source map spec, the first bit is the sign,
+ // the next four bits are the actual value, and the 6th bit is the continuation
+ // bit. The continuation bit tells us whether there are more digits in this
+ // value following this digit.
+ //
+ // Continuation
+ // | Sign
+ // | |
+ // V V
+ // 101011
+ //
+ pub fn encodeVLQ(
+ value: i32,
+ ) VLQ {
+ var len: u4 = 0;
+ var bytes: [vlq_max_in_bytes]u8 = undefined;
+
+ var vlq: u32 = if (value >= 0)
+ @bitCast(u32, value << 1)
+ else
+ @bitCast(u32, (-value << 1) | 1);
+
+ // source mappings are limited to i32
+ comptime var i: usize = 0;
+ inline while (i < vlq_max_in_bytes) : (i += 1) {
+ var digit = vlq & 31;
+ vlq >>= 5;
+
+ // If there are still more digits in this value, we must make sure the
+ // continuation bit is marked
+ if (vlq != 0) {
+ digit |= 32;
+ }
+
+ bytes[len] = base64[digit];
+ len += 1;
+
+ if (vlq == 0) {
+ return VLQ{
+ .bytes = bytes,
+ .len = len,
+ };
+ }
+ }
+
+ return .{ .bytes = bytes, .len = 0 };
+ }
+
+ pub const VLQResult = struct {
+ value: i32 = 0,
+ start: usize = 0,
+ };
+
+ // base64 stores values up to 7 bits
+ const base64_lut: [std.math.maxInt(u7)]u7 = brk: {
+ @setEvalBranchQuota(9999);
+ var bytes = [_]u7{std.math.maxInt(u7)} ** std.math.maxInt(u7);
+
+ for (base64) |c, i| {
+ bytes[c] = i;
+ }
+
+ break :brk bytes;
+ };
+
+ pub fn decodeVLQ(encoded: []const u8, start: usize) VLQResult {
+ var shift: u8 = 0;
+ var vlq: u32 = 0;
+
+ // hint to the compiler what the maximum value is
+ const encoded_ = encoded[start..][0..@minimum(encoded.len - start, comptime (vlq_max_in_bytes + 1))];
+
+ // inlining helps for the 1 or 2 byte case, hurts a little for larger
+ comptime var i: usize = 0;
+ inline while (i < vlq_max_in_bytes + 1) : (i += 1) {
+ const index = @as(u32, base64_lut[@truncate(u7, encoded_[i])]);
+
+ // decode a byte
+ vlq |= (index & 31) << @truncate(u5, shift);
+ shift += 5;
+
+ // Stop if there's no continuation bit
+ if ((index & 32) == 0) {
+ return VLQResult{
+ .start = i + start,
+ .value = if ((vlq & 1) == 0)
+ @intCast(i32, vlq >> 1)
+ else
+ -@intCast(i32, (vlq >> 1)),
+ };
+ }
+ }
+
+ return VLQResult{ .start = start + encoded_.len, .value = 0 };
+ }
+};
pub fn main() anyerror!void {
const args = try std.process.argsAlloc(std.heap.c_allocator);
@@ -8,6 +139,7 @@ pub fn main() anyerror!void {
var numbers = try std.heap.c_allocator.alloc(i32, how_many);
var results = try std.heap.c_allocator.alloc(SourceMap.VLQ, how_many);
+ var leb_buf = try std.heap.c_allocator.alloc(u8, how_many * 8);
const byte_size = std.mem.sliceAsBytes(numbers).len;
var rand = std.rand.DefaultPrng.init(0);
@@ -49,7 +181,29 @@ pub fn main() anyerror!void {
std.debug.print("[{d}] decode: {} in {}\n", .{ how_many, std.fmt.fmtIntSizeDec(byte_size), std.fmt.fmtDuration(elapsed) });
}
- std.debug.print("\nNumbers between 0 - 8096 (most columns won't exceed 255):\n\n", .{});
+ {
+ var timer = try std.time.Timer.start();
+ var stream = std.io.fixedBufferStream(leb_buf);
+ var writer = stream.writer();
+ for (numbers) |n| {
+ std.leb.writeILEB128(writer, n) catch unreachable;
+ }
+ const elapsed = timer.read();
+ std.debug.print("[{d}] ILEB128 encode: {} in {}\n", .{ how_many, std.fmt.fmtIntSizeDec(byte_size), std.fmt.fmtDuration(elapsed) });
+ }
+
+ {
+ var timer = try std.time.Timer.start();
+ var stream = std.io.fixedBufferStream(leb_buf);
+ var reader = stream.reader();
+ for (numbers) |_, i| {
+ numbers[i] = std.leb.readILEB128(i32, reader) catch unreachable;
+ }
+ const elapsed = timer.read();
+ std.debug.print("[{d}] ILEB128 decode: {} in {}\n", .{ how_many, std.fmt.fmtIntSizeDec(byte_size), std.fmt.fmtDuration(elapsed) });
+ }
+
+ std.debug.print("\nNumbers between 0 - 8096:\n\n", .{});
for (numbers) |_, i| {
numbers[i] = rand.random().intRangeAtMost(i32, 0, 8096);
@@ -86,7 +240,29 @@ pub fn main() anyerror!void {
std.debug.print("[{d}] decode: {} in {}\n", .{ how_many, std.fmt.fmtIntSizeDec(byte_size), std.fmt.fmtDuration(elapsed) });
}
- std.debug.print("\nNumbers between 0 - 255 (most columns won't exceed 255):\n\n", .{});
+ {
+ var timer = try std.time.Timer.start();
+ var stream = std.io.fixedBufferStream(leb_buf);
+ var writer = stream.writer();
+ for (numbers) |n| {
+ std.leb.writeILEB128(writer, n) catch unreachable;
+ }
+ const elapsed = timer.read();
+ std.debug.print("[{d}] ILEB128 encode: {} in {}\n", .{ how_many, std.fmt.fmtIntSizeDec(byte_size), std.fmt.fmtDuration(elapsed) });
+ }
+
+ {
+ var timer = try std.time.Timer.start();
+ var stream = std.io.fixedBufferStream(leb_buf);
+ var reader = stream.reader();
+ for (numbers) |_, i| {
+ numbers[i] = std.leb.readILEB128(i32, reader) catch unreachable;
+ }
+ const elapsed = timer.read();
+ std.debug.print("[{d}] ILEB128 decode: {} in {}\n", .{ how_many, std.fmt.fmtIntSizeDec(byte_size), std.fmt.fmtDuration(elapsed) });
+ }
+
+ std.debug.print("\nNumbers between 0 - 255:\n\n", .{});
for (numbers) |_, i| {
numbers[i] = rand.random().intRangeAtMost(i32, 0, 255);
@@ -122,4 +298,61 @@ pub fn main() anyerror!void {
const elapsed = timer.read();
std.debug.print("[{d}] decode: {} in {}\n", .{ how_many, std.fmt.fmtIntSizeDec(byte_size), std.fmt.fmtDuration(elapsed) });
}
+
+ {
+ var timer = try std.time.Timer.start();
+ var stream = std.io.fixedBufferStream(leb_buf);
+ var writer = stream.writer();
+ for (numbers) |n| {
+ std.leb.writeILEB128(writer, n) catch unreachable;
+ }
+ const elapsed = timer.read();
+ std.debug.print("[{d}] ILEB128 encode: {} in {}\n", .{ how_many, std.fmt.fmtIntSizeDec(byte_size), std.fmt.fmtDuration(elapsed) });
+ }
+
+ {
+ var timer = try std.time.Timer.start();
+ var stream = std.io.fixedBufferStream(leb_buf);
+ var reader = stream.reader();
+ for (numbers) |_, i| {
+ numbers[i] = std.leb.readILEB128(i32, reader) catch unreachable;
+ }
+ const elapsed = timer.read();
+ std.debug.print("[{d}] ILEB128 decode: {} in {}\n", .{ how_many, std.fmt.fmtIntSizeDec(byte_size), std.fmt.fmtDuration(elapsed) });
+ }
+}
+
+test "encodeVLQ" {
+ const fixtures = .{
+ .{ 2_147_483_647, "+/////D" },
+ .{ -2_147_483_647, "//////D" },
+ .{ 0, "A" },
+ .{ 1, "C" },
+ .{ -1, "D" },
+ .{ 123, "2H" },
+ .{ 123456789, "qxmvrH" },
+ };
+ inline for (fixtures) |fixture| {
+ const result = SourceMap.encodeVLQ(fixture[0]);
+ try std.testing.expectEqualStrings(fixture[1], result.bytes[0..result.len]);
+ }
+}
+
+test "decodeVLQ" {
+ const fixtures = .{
+ .{ 2_147_483_647, "+/////D" },
+ .{ -2_147_483_647, "//////D" },
+ .{ 0, "A" },
+ .{ 1, "C" },
+ .{ -1, "D" },
+ .{ 123, "2H" },
+ .{ 123456789, "qxmvrH" },
+ };
+ inline for (fixtures) |fixture| {
+ const result = SourceMap.decodeVLQ(fixture[1], 0);
+ try std.testing.expectEqual(
+ result.value,
+ fixture[0],
+ );
+ }
}