aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Dylan Conway <35280289+dylan-conway@users.noreply.github.com> 2023-04-30 22:09:40 -0700
committerGravatar GitHub <noreply@github.com> 2023-04-30 22:09:40 -0700
commit0490bd3d2ee7b9b7fb2748be52dbbb6d49b21673 (patch)
tree52792ea88e4a132ee1d5094a38ea4fd6b28ce711
parent137dc6e19fedc31e32e25e1a8d7d04070f1a9a27 (diff)
downloadbun-0490bd3d2ee7b9b7fb2748be52dbbb6d49b21673.tar.gz
bun-0490bd3d2ee7b9b7fb2748be52dbbb6d49b21673.tar.zst
bun-0490bd3d2ee7b9b7fb2748be52dbbb6d49b21673.zip
Implement source maps (#2770)
* wip * Begin computing source map tables * source map progress * external source maps * fix merge * remove `@as` coercion * inline source maps and output source map shifts * fix `codeWithSourceMapShifts()` after merge * remove second call to `findReachableFiles()` * use `worker.allocator`, remove comment * don't reuse memory for source and sourceContents * don't reuse `quote_buf` * fix writing to stdout * Add comment * Don't include a sourcemap if the text was empty * Make the parser faster * +16% faster sourcemap generation @dylan-conway I'll need you to look this over to make sure I didn't mess anything up Though it currently doesn't generate the offsets in the right order... * 30% performance improvement to Bun.build() * Print `debugId` in source maps cc @mitsuhiko @notzeeg --------- Co-authored-by: Jarred Sumner <709451+Jarred-Sumner@users.noreply.github.com>
-rw-r--r--src/__global.zig1
-rw-r--r--src/bun.js/api/JSBundler.zig2
-rw-r--r--src/bundler.zig1
-rw-r--r--src/bundler/bundle_v2.zig891
-rw-r--r--src/cli/build_command.zig34
-rw-r--r--src/feature_flags.zig5
-rw-r--r--src/js_ast.zig6
-rw-r--r--src/js_parser.zig34
-rw-r--r--src/js_printer.zig126
-rw-r--r--src/options.zig6
-rw-r--r--src/renamer.zig15
-rw-r--r--src/sourcemap/sourcemap.zig181
-rw-r--r--src/string_mutable.zig4
-rw-r--r--src/thread_pool.zig4
14 files changed, 1100 insertions, 210 deletions
diff --git a/src/__global.zig b/src/__global.zig
index 3dc4ab538..9e2887cce 100644
--- a/src/__global.zig
+++ b/src/__global.zig
@@ -61,6 +61,7 @@ pub fn setThreadName(name: StringTypes.stringZ) void {
}
}
+/// Flushes stdout and stderr and exits with the given code.
pub fn exit(code: u8) noreturn {
Output.flush();
std.os.exit(code);
diff --git a/src/bun.js/api/JSBundler.zig b/src/bun.js/api/JSBundler.zig
index 022c83cb4..87fc29efc 100644
--- a/src/bun.js/api/JSBundler.zig
+++ b/src/bun.js/api/JSBundler.zig
@@ -62,7 +62,7 @@ pub const JSBundler = struct {
names: Names = .{},
label: OwnedString = OwnedString.initEmpty(bun.default_allocator),
external: bun.StringSet = bun.StringSet.init(bun.default_allocator),
- sourcemap: options.SourceMapOption = .none,
+ source_map: options.SourceMapOption = .none,
public_path: OwnedString = OwnedString.initEmpty(bun.default_allocator),
pub const List = bun.StringArrayHashMapUnmanaged(Config);
diff --git a/src/bundler.zig b/src/bundler.zig
index b6f9eac26..fa3cca0d7 100644
--- a/src/bundler.zig
+++ b/src/bundler.zig
@@ -366,6 +366,7 @@ pub const Bundler = struct {
elapsed: u64 = 0,
needs_runtime: bool = false,
router: ?Router = null,
+ source_map: options.SourceMapOption = .none,
linker: Linker,
timer: SystemTimer = undefined,
diff --git a/src/bundler/bundle_v2.zig b/src/bundler/bundle_v2.zig
index ee5e6056b..661aafd9d 100644
--- a/src/bundler/bundle_v2.zig
+++ b/src/bundler/bundle_v2.zig
@@ -64,6 +64,9 @@ const json_parser = @import("../json_parser.zig");
const js_printer = @import("../js_printer.zig");
const js_ast = @import("../js_ast.zig");
const linker = @import("../linker.zig");
+const sourcemap = bun.sourcemap;
+const Joiner = bun.Joiner;
+const base64 = bun.base64;
const Ref = @import("../ast/base.zig").Ref;
const Define = @import("../defines.zig").Define;
const DebugOptions = @import("../cli.zig").Command.DebugOptions;
@@ -205,6 +208,9 @@ pub const ThreadPool = struct {
deinit_task: ThreadPoolLib.Task = .{ .callback = deinitCallback },
+ temporary_arena: std.heap.ArenaAllocator = undefined,
+ stmt_list: LinkerContext.StmtList = undefined,
+
pub fn deinitCallback(task: *ThreadPoolLib.Task) void {
debug("Worker.deinit()", .{});
var this = @fieldParentPtr(Worker, "deinit_task", task);
@@ -282,6 +288,8 @@ pub const ThreadPool = struct {
this.data.bundler.linker.resolver = &this.data.bundler.resolver;
this.data.bundler.macro_context = js_ast.Macro.MacroContext.init(&this.data.bundler);
this.data.macro_context = this.data.bundler.macro_context.?;
+ this.temporary_arena = std.heap.ArenaAllocator.init(this.allocator);
+ this.stmt_list = LinkerContext.StmtList.init(this.allocator);
const CacheSet = @import("../cache.zig");
@@ -663,6 +671,7 @@ pub const BundleV2 = struct {
generator.linker.options.minify_syntax = bundler.options.minify_syntax;
generator.linker.options.minify_identifiers = bundler.options.minify_identifiers;
generator.linker.options.minify_whitespace = bundler.options.minify_whitespace;
+ generator.linker.options.source_maps = bundler.options.source_map;
var pool = try generator.graph.allocator.create(ThreadPool);
if (enable_reloading) {
@@ -1322,7 +1331,7 @@ pub const BundleV2 = struct {
bundler.options.minify_whitespace = config.minify.whitespace;
bundler.options.minify_identifiers = config.minify.identifiers;
bundler.options.inlining = config.minify.syntax;
- bundler.options.sourcemap = config.sourcemap;
+ bundler.options.source_map = config.source_map;
try bundler.configureDefines();
bundler.configureLinker();
@@ -3087,6 +3096,8 @@ const LinkerGraph = struct {
/// may be "entryPointUserSpecified" instead of "entryPointDynamicImport".
entry_point_kind: EntryPoint.Kind = .none,
+ line_offset_table: bun.sourcemap.LineOffsetTable.List = .{},
+
pub fn isEntryPoint(this: *const File) bool {
return this.entry_point_kind.isEntryPoint();
}
@@ -3132,6 +3143,12 @@ const LinkerContext = struct {
/// string buffer containing prefix for each unique keys
unique_key_prefix: string = "",
+ source_maps: SourceMapData = .{},
+
+ /// This will eventually be used for reference-counting LinkerContext
+ /// to know whether or not we can free it safely.
+ pending_task_count: std.atomic.Atomic(u32) = std.atomic.Atomic(u32).init(0),
+
pub const LinkerOptions = struct {
output_format: options.OutputFormat = .esm,
ignore_dce_annotations: bool = false,
@@ -3139,6 +3156,7 @@ const LinkerContext = struct {
minify_whitespace: bool = false,
minify_syntax: bool = false,
minify_identifiers: bool = false,
+ source_maps: options.SourceMapOption = .none,
mode: Mode = Mode.bundle,
@@ -3150,6 +3168,43 @@ const LinkerContext = struct {
};
};
+ pub const SourceMapData = struct {
+ wait_group: sync.WaitGroup = undefined,
+ tasks: []Task = &.{},
+
+ pub const Task = struct {
+ ctx: *LinkerContext,
+ source_index: Index.Int,
+ thread_task: ThreadPoolLib.Task = .{ .callback = &run },
+
+ pub fn run(thread_task: *ThreadPoolLib.Task) void {
+ var task = @fieldParentPtr(Task, "thread_task", thread_task);
+ defer {
+ task.ctx.markPendingTaskDone();
+ task.ctx.source_maps.wait_group.finish();
+ }
+
+ SourceMapData.compute(task.ctx, ThreadPool.Worker.get(@fieldParentPtr(BundleV2, "linker", task.ctx)).allocator, task.source_index);
+ }
+ };
+
+ pub fn compute(this: *LinkerContext, allocator: std.mem.Allocator, source_index: Index.Int) void {
+ debug("Computing LineOffsetTable: {d}", .{source_index});
+ var line_offset_table: *bun.sourcemap.LineOffsetTable.List = &this.graph.files.items(.line_offset_table)[source_index];
+ const source: *const Logger.Source = &this.parse_graph.input_files.items(.source)[source_index];
+
+ const approximate_line_count = this.graph.ast.items(.approximate_newline_count)[source_index];
+
+ line_offset_table.* = bun.sourcemap.LineOffsetTable.generate(
+ allocator,
+ source.contents,
+
+ // We don't support sourcemaps for source files with more than 2^31 lines
+ @intCast(i32, @truncate(u31, approximate_line_count)),
+ );
+ }
+ };
+
fn isExternalDynamicImport(this: *LinkerContext, record: *const ImportRecord, source_index: u32) bool {
return this.graph.code_splitting and
record.kind == .dynamic and
@@ -3210,6 +3265,33 @@ const LinkerContext = struct {
this.cjs_runtime_ref = runtime_named_exports.get("__commonJS").?.ref;
}
+ pub fn computeDataForSourceMap(
+ this: *LinkerContext,
+ reachable: []const Index.Int,
+ ) void {
+ this.source_maps.wait_group.init();
+ this.source_maps.wait_group.counter = @truncate(u32, reachable.len);
+ this.source_maps.tasks = this.allocator.alloc(SourceMapData.Task, reachable.len) catch unreachable;
+ var batch = ThreadPoolLib.Batch{};
+ for (reachable, this.source_maps.tasks) |source_index, *task| {
+ task.* = .{
+ .ctx = this,
+ .source_index = source_index,
+ };
+ batch.push(ThreadPoolLib.Batch.from(&task.thread_task));
+ }
+ this.scheduleTasks(batch);
+ }
+
+ pub fn scheduleTasks(this: *LinkerContext, batch: ThreadPoolLib.Batch) void {
+ _ = this.pending_task_count.fetchAdd(@truncate(u32, batch.len), .Monotonic);
+ this.parse_graph.pool.pool.schedule(batch);
+ }
+
+ pub fn markPendingTaskDone(this: *LinkerContext) void {
+ _ = this.pending_task_count.fetchSub(1, .Monotonic);
+ }
+
pub noinline fn link(
this: *LinkerContext,
bundle: *BundleV2,
@@ -3225,6 +3307,10 @@ const LinkerContext = struct {
reachable,
);
+ if (this.options.source_maps != .none) {
+ this.computeDataForSourceMap(@ptrCast([]Index.Int, reachable));
+ }
+
if (comptime FeatureFlags.help_catch_memory_issues) {
this.checkForMemoryCorruption();
}
@@ -3305,6 +3391,7 @@ const LinkerContext = struct {
.content = .{
.javascript = .{},
},
+ .output_source_map = sourcemap.SourceMapPieces.init(this.allocator),
};
}
var file_entry_bits: []AutoBitSet = this.graph.files.items(.entry_bits);
@@ -3337,6 +3424,7 @@ const LinkerContext = struct {
.content = .{
.javascript = .{},
},
+ .output_source_map = sourcemap.SourceMapPieces.init(this.allocator),
};
}
@@ -5504,9 +5592,13 @@ const LinkerContext = struct {
wg: *sync.WaitGroup,
c: *LinkerContext,
chunks: []Chunk,
+ chunk: *Chunk,
};
fn generateChunkJS(ctx: GenerateChunkCtx, chunk: *Chunk, chunk_index: usize) void {
- generateChunkJS_(ctx, chunk, chunk_index) catch |err| Output.panic("TODO: handle error: {s}", .{@errorName(err)});
+ defer ctx.wg.finish();
+ const worker = ThreadPool.Worker.get(@fieldParentPtr(BundleV2, "linker", ctx.c));
+ defer worker.unget();
+ postProcessJSChunk(ctx, worker, chunk, chunk_index) catch |err| Output.panic("TODO: handle error: {s}", .{@errorName(err)});
}
// TODO: investigate if we need to parallelize this function
@@ -5759,17 +5851,36 @@ const LinkerContext = struct {
return r.toRenamer();
}
- fn generateChunkJS_(ctx: GenerateChunkCtx, chunk: *Chunk, chunk_index: usize) !void {
- _ = chunk_index;
+ fn generateJSRenamer(ctx: GenerateChunkCtx, chunk: *Chunk, chunk_index: usize) void {
defer ctx.wg.finish();
var worker = ThreadPool.Worker.get(@fieldParentPtr(BundleV2, "linker", ctx.c));
defer worker.unget();
+ generateJSRenamer_(ctx, worker, chunk, chunk_index);
+ }
- const allocator = worker.allocator;
- const c = ctx.c;
- std.debug.assert(chunk.content == .javascript);
+ fn generateJSRenamer_(ctx: GenerateChunkCtx, worker: *ThreadPool.Worker, chunk: *Chunk, chunk_index: usize) void {
+ _ = chunk_index;
+ chunk.renamer = ctx.c.renameSymbolsInChunk(
+ worker.allocator,
+ chunk,
+ chunk.content.javascript.files_in_chunk_order,
+ ) catch @panic("TODO: handle error");
+ }
- var repr = &chunk.content.javascript;
+ fn generateCompileResultForJSChunk(task: *ThreadPoolLib.Task) void {
+ const part_range: *const PendingPartRange = @fieldParentPtr(PendingPartRange, "task", task);
+ const ctx = part_range.ctx;
+ defer ctx.wg.finish();
+ var worker = ThreadPool.Worker.get(@fieldParentPtr(BundleV2, "linker", ctx.c));
+ defer worker.unget();
+ ctx.chunk.compile_results_for_chunk[part_range.i] = generateCompileResultForJSChunk_(worker, ctx.c, ctx.chunk, part_range.part_range);
+ }
+
+ fn generateCompileResultForJSChunk_(worker: *ThreadPool.Worker, c: *LinkerContext, chunk: *Chunk, part_range: PartRange) CompileResult {
+ var arena = &worker.temporary_arena;
+ var buffer_writer = js_printer.BufferWriter.init(worker.allocator) catch unreachable;
+ defer _ = arena.reset(.retain_capacity);
+ worker.stmt_list.reset();
var runtime_scope: *Scope = &c.graph.ast.items(.module_scope)[c.graph.files.items(.input_file)[Index.runtime.value].get()];
var runtime_members = &runtime_scope.members;
@@ -5777,71 +5888,52 @@ const LinkerContext = struct {
const toESMRef = c.graph.symbols.follow(runtime_members.get("__toESM").?.ref);
const runtimeRequireRef = c.graph.symbols.follow(runtime_members.get("__require").?.ref);
- var r = try c.renameSymbolsInChunk(allocator, chunk, repr.files_in_chunk_order);
- defer r.deinit();
- const part_ranges = repr.parts_in_chunk_in_order;
- var stmts = StmtList.init(allocator);
- defer stmts.deinit();
+ const result = c.generateCodeForFileInChunkJS(
+ &buffer_writer,
+ chunk.renamer,
+ chunk,
+ part_range,
+ toCommonJSRef,
+ toESMRef,
+ runtimeRequireRef,
+ &worker.stmt_list,
+ worker.allocator,
+ arena.allocator(),
+ );
- var arena = std.heap.ArenaAllocator.init(allocator);
- defer arena.deinit();
- var compile_results = std.ArrayList(CompileResult).initCapacity(allocator, part_ranges.len) catch unreachable;
- {
- defer _ = arena.reset(.retain_capacity);
+ return .{
+ .javascript = .{
+ .result = result,
+ .source_index = part_range.source_index.get(),
+ },
+ };
+ }
- var buffer_writer = js_printer.BufferWriter.init(allocator) catch unreachable;
+ // This runs after we've already populated the compile results
+ fn postProcessJSChunk(ctx: GenerateChunkCtx, worker: *ThreadPool.Worker, chunk: *Chunk, chunk_index: usize) !void {
+ _ = chunk_index;
+ const allocator = worker.allocator;
+ const c = ctx.c;
+ std.debug.assert(chunk.content == .javascript);
- for (part_ranges, 0..) |part_range, i| {
- if (i > 0) _ = arena.reset(.retain_capacity);
- const result = c.generateCodeForFileInChunkJS(
- &buffer_writer,
- r,
- chunk,
- part_range,
- toCommonJSRef,
- toESMRef,
- runtimeRequireRef,
- &stmts,
- allocator,
- arena.allocator(),
- );
+ js_ast.Expr.Data.Store.create(bun.default_allocator);
+ js_ast.Stmt.Data.Store.create(bun.default_allocator);
- if (i < part_ranges.len - 1) {
- compile_results.appendAssumeCapacity(
- // we reuse the memory buffer up until the final chunk to minimize reallocations
- .{
- .javascript = .{
- .result = result.clone(allocator) catch unreachable,
- .source_index = part_range.source_index.get(),
- },
- },
- );
- } else {
- if (comptime Environment.allow_assert) {
- if (result == .result) {
- if (buffer_writer.buffer.list.capacity > result.result.code.len) {
- // add a 0 to make it easier to view the code in a debugger
- // but only if room
- buffer_writer.buffer.list.items.ptr[result.result.code.len] = 0;
- }
- }
- }
+ defer chunk.renamer.deinit(bun.default_allocator);
- // the final chunk owns the memory buffer
- compile_results.appendAssumeCapacity(.{
- .javascript = .{
- .result = result.clone(allocator) catch unreachable,
- .source_index = part_range.source_index.get(),
- },
- });
- }
- }
- }
+ var arena = std.heap.ArenaAllocator.init(allocator);
+ defer arena.deinit();
// Also generate the cross-chunk binding code
var cross_chunk_prefix: []u8 = &.{};
var cross_chunk_suffix: []u8 = &.{};
+ var runtime_scope: *Scope = &c.graph.ast.items(.module_scope)[c.graph.files.items(.input_file)[Index.runtime.value].get()];
+ var runtime_members = &runtime_scope.members;
+ const toCommonJSRef = c.graph.symbols.follow(runtime_members.get("__toCommonJS").?.ref);
+ const toESMRef = c.graph.symbols.follow(runtime_members.get("__toESM").?.ref);
+ const runtimeRequireRef = c.graph.symbols.follow(runtime_members.get("__require").?.ref);
+
{
const indent: usize = 0;
// TODO: IIFE indent
@@ -5870,25 +5962,33 @@ const LinkerContext = struct {
);
}
+ const ast = c.graph.ast.get(chunk.entry_point.source_index);
+
cross_chunk_prefix = js_printer.print(
allocator,
c.resolver.opts.target,
+ ast,
+ c.source_(chunk.entry_point.source_index),
print_options,
cross_chunk_import_records.slice(),
&[_]js_ast.Part{
.{ .stmts = chunk.content.javascript.cross_chunk_prefix_stmts.slice() },
},
- r,
+ chunk.renamer,
+ false,
).result.code;
cross_chunk_suffix = js_printer.print(
allocator,
c.resolver.opts.target,
+ ast,
+ c.source_(chunk.entry_point.source_index),
print_options,
&.{},
&[_]js_ast.Part{
.{ .stmts = chunk.content.javascript.cross_chunk_suffix_stmts.slice() },
},
- r,
+ chunk.renamer,
+ false,
).result.code;
}
@@ -5901,7 +6001,7 @@ const LinkerContext = struct {
chunk.entry_point.source_index,
allocator,
arena.allocator(),
- r,
+ chunk.renamer,
);
}
@@ -5915,7 +6015,8 @@ const LinkerContext = struct {
.input = chunk.unique_key,
},
};
- var line_offset: bun.sourcemap.LineColumnOffset.Optional = .{ .null = {} };
+
+ var line_offset: bun.sourcemap.LineColumnOffset.Optional = if (c.options.source_maps != .none) .{ .value = .{} } else .{ .null = {} };
// Concatenate the generated JavaScript chunks together
@@ -5954,9 +6055,13 @@ const LinkerContext = struct {
// Concatenate the generated JavaScript chunks together
var prev_filename_comment: Index.Int = 0;
+ const compile_results = chunk.compile_results_for_chunk;
+ var compile_results_for_source_map = std.MultiArrayList(CompileResultForSourceMap){};
+
+ compile_results_for_source_map.ensureUnusedCapacity(allocator, compile_results.len) catch unreachable;
const sources: []const Logger.Source = c.parse_graph.input_files.items(.source);
- for (@as([]CompileResult, compile_results.items)) |compile_result| {
+ for (@as([]CompileResult, compile_results)) |compile_result| {
const source_index = compile_result.sourceIndex();
const is_runtime = source_index == Index.runtime.value;
@@ -6017,10 +6122,25 @@ const LinkerContext = struct {
line_offset.advance(compile_result.code());
j.append(compile_result.code(), 0, bun.default_allocator);
} else {
- line_offset.advance(compile_result.code());
j.append(compile_result.code(), 0, bun.default_allocator);
- // TODO: sourcemap
+ var generated_offset = line_offset;
+ line_offset.reset();
+
+ if (c.options.source_maps != .none) {
+ switch (compile_result.javascript.result) {
+ .result => |res| {
+ if (res.source_map) |source_map| {
+ try compile_results_for_source_map.append(allocator, CompileResultForSourceMap{
+ .source_map_chunk = source_map,
+ .generated_offset = generated_offset.value,
+ .source_index = compile_result.sourceIndex(),
+ });
+ }
+ },
+ else => {},
+ }
+ }
}
// TODO: metafile
@@ -6075,6 +6195,145 @@ const LinkerContext = struct {
chunk.isolated_hash = c.generateIsolatedHash(chunk);
chunk.is_executable = is_executable;
+
+ if (c.options.source_maps != .none) {
+ const can_have_shifts = chunk.intermediate_output == .pieces;
+ chunk.output_source_map = try c.generateSourceMapForChunk(
+ chunk.isolated_hash,
+ worker,
+ compile_results_for_source_map,
+ c.resolver.opts.output_dir,
+ can_have_shifts,
+ );
+ }
+ }
+
+ pub fn generateSourceMapForChunk(
+ c: *LinkerContext,
+ isolated_hash: u64,
+ worker: *ThreadPool.Worker,
+ results: std.MultiArrayList(CompileResultForSourceMap),
+ chunk_abs_dir: string,
+ can_have_shifts: bool,
+ ) !sourcemap.SourceMapPieces {
+ std.debug.assert(results.len > 0);
+
+ var j = Joiner{};
+ const sources = c.parse_graph.input_files.items(.source);
+
+ var source_index_to_sources_index = std.AutoHashMap(u32, u32).init(worker.allocator);
+ defer source_index_to_sources_index.deinit();
+ var next_source_index: u32 = 0;
+ const source_indices = results.items(.source_index);
+
+ j.push("{\n \"version\": 3,\n \"sources\": [");
+ if (source_indices.len > 0) {
+ {
+ var path = sources[source_indices[0]].path;
+
+ if (strings.eqlComptime(path.namespace, "file")) {
+ const rel_path = try std.fs.path.relative(worker.allocator, chunk_abs_dir, path.text);
+ path.pretty = rel_path;
+ }
+
+ var quote_buf = try MutableString.init(worker.allocator, path.pretty.len + 2);
+ quote_buf = try js_printer.quoteForJSON(path.pretty, quote_buf, false);
+ j.push(quote_buf.list.items);
+ }
+ if (source_indices.len > 1) {
+ for (source_indices[1..]) |index| {
+ var path = sources[index].path;
+
+ if (strings.eqlComptime(path.namespace, "file")) {
+ const rel_path = try std.fs.path.relative(worker.allocator, chunk_abs_dir, path.text);
+ path.pretty = rel_path;
+ }
+
+ var quote_buf = try MutableString.init(worker.allocator, path.pretty.len + ", ".len + 2);
+ quote_buf.appendAssumeCapacity(", ");
+ quote_buf = try js_printer.quoteForJSON(path.pretty, quote_buf, false);
+ j.push(quote_buf.list.items);
+ }
+ }
+ }
+
+ j.push("],\n \"sourcesContent\": [\n ");
+
+ if (source_indices.len > 0) {
+ {
+ const contents = sources[source_indices[0]].contents;
+ var quote_buf = try MutableString.init(worker.allocator, contents.len);
+ quote_buf = try js_printer.quoteForJSON(contents, quote_buf, false);
+ j.push(quote_buf.list.items);
+ }
+
+ if (source_indices.len > 1) {
+ for (source_indices[1..]) |index| {
+ const contents = sources[index].contents;
+ var quote_buf = try MutableString.init(worker.allocator, contents.len + 2 + ", ".len);
+ quote_buf.appendAssumeCapacity(",\n ");
+ quote_buf = try js_printer.quoteForJSON(contents, quote_buf, false);
+ j.push(quote_buf.list.items);
+ }
+ }
+ }
+ j.push("\n], \"mappings\": \"");
+
+ var mapping_start = j.len;
+ var prev_end_state = sourcemap.SourceMapState{};
+ var prev_column_offset: i32 = 0;
+ const source_map_chunks = results.items(.source_map_chunk);
+ const offsets = results.items(.generated_offset);
+ for (source_map_chunks, offsets, source_indices) |chunk, offset, current_source_index| {
+ var res = try source_index_to_sources_index.getOrPut(current_source_index);
+ if (res.found_existing) continue;
+ res.value_ptr.* = next_source_index;
+ const source_index = @intCast(i32, next_source_index);
+ next_source_index += 1;
+
+ var start_state = sourcemap.SourceMapState{
+ .source_index = source_index,
+ .generated_line = offset.lines,
+ .generated_column = offset.columns,
+ };
+
+ if (offset.lines == 0) {
+ start_state.generated_column += prev_column_offset;
+ }
+
+ try sourcemap.appendSourceMapChunk(&j, worker.allocator, prev_end_state, start_state, chunk.buffer.list.items);
+
+ prev_end_state = chunk.end_state;
+ prev_end_state.source_index = source_index;
+ prev_column_offset = chunk.final_generated_column;
+
+ if (prev_end_state.generated_line == 0) {
+ prev_end_state.generated_column += start_state.generated_column;
+ prev_column_offset += start_state.generated_column;
+ }
+ }
+ const mapping_end = j.len;
+
+ if (comptime FeatureFlags.source_map_debug_id) {
+ j.push("\",\n \"debugId\": \"");
+ j.push(try std.fmt.allocPrint(worker.allocator, "{}", .{bun.sourcemap.DebugIDFormatter{ .id = isolated_hash }}));
+ j.push("\",\n \"names\": []\n}");
+ } else {
+ j.push("\",\n \"names\": []\n}");
+ }
+
+ const done = try j.done(worker.allocator);
+
+ var pieces = sourcemap.SourceMapPieces.init(worker.allocator);
+ if (can_have_shifts) {
+ try pieces.prefix.appendSlice(done[0..mapping_start]);
+ try pieces.mappings.appendSlice(done[mapping_start..mapping_end]);
+ try pieces.suffix.appendSlice(done[mapping_end..]);
+ } else {
+ try pieces.prefix.appendSlice(done);
+ }
+
+ return pieces;
}
pub fn generateIsolatedHash(c: *LinkerContext, chunk: *const Chunk) u64 {
@@ -6489,6 +6748,8 @@ const LinkerContext = struct {
.result = js_printer.print(
allocator,
c.resolver.opts.target,
+ ast,
+ c.source_(source_index),
print_options,
ast.import_records.slice(),
&[_]js_ast.Part{
@@ -6497,13 +6758,14 @@ const LinkerContext = struct {
},
},
r,
+ false,
),
.source_index = source_index,
},
};
}
- const StmtList = struct {
+ pub const StmtList = struct {
inside_wrapper_prefix: std.ArrayList(Stmt),
outside_wrapper_prefix: std.ArrayList(Stmt),
inside_wrapper_suffix: std.ArrayList(Stmt),
@@ -7727,7 +7989,7 @@ const LinkerContext = struct {
},
};
- const print_options = js_printer.Options{
+ var print_options = js_printer.Options{
// TODO: IIFE
.indent = 0,
@@ -7747,6 +8009,7 @@ const LinkerContext = struct {
requireOrImportMetaForSource,
c,
),
+ .line_offset_tables = c.graph.files.items(.line_offset_table)[part_range.source_index.get()],
};
writer.buffer.reset();
@@ -7755,17 +8018,31 @@ const LinkerContext = struct {
);
defer writer.* = printer.ctx;
- return js_printer.printWithWriter(
- *js_printer.BufferPrinter,
- &printer,
- ast.target,
- print_options,
- ast.import_records.slice(),
- parts_to_print,
- r,
- );
+ switch (c.options.source_maps != .none and !part_range.source_index.isRuntime()) {
+ inline else => |enable_source_maps| {
+ return js_printer.printWithWriter(
+ *js_printer.BufferPrinter,
+ &printer,
+ ast.target,
+ ast,
+ c.source_(part_range.source_index.get()),
+ print_options,
+ ast.import_records.slice(),
+ parts_to_print,
+ r,
+ enable_source_maps,
+ );
+ },
+ }
}
+ const PendingPartRange = struct {
+ part_range: PartRange,
+ task: ThreadPoolLib.Task,
+ ctx: *GenerateChunkCtx,
+ i: u32 = 0,
+ };
+
fn requireOrImportMetaForSource(
c: *LinkerContext,
source_index: Index.Int,
@@ -7781,10 +8058,15 @@ const LinkerContext = struct {
};
}
+ const SubstituteChunkFinalPathResult = struct {
+ j: Joiner,
+ shifts: []sourcemap.SourceMapShifts,
+ };
+
pub fn generateChunksInParallel(c: *LinkerContext, chunks: []Chunk) !std.ArrayList(options.OutputFile) {
{
- debug("START Generating {d} chunks in parallel", .{chunks.len});
- defer debug(" DONE Generating {d} chunks in parallel", .{chunks.len});
+ debug(" START Generating {d} renamers in parallel", .{chunks.len});
+ defer debug(" DONE Generating {d} renamers in parallel", .{chunks.len});
var wait_group = try c.allocator.create(sync.WaitGroup);
wait_group.init();
defer {
@@ -7792,8 +8074,68 @@ const LinkerContext = struct {
c.allocator.destroy(wait_group);
}
wait_group.counter = @truncate(u32, chunks.len);
- var ctx = GenerateChunkCtx{ .wg = wait_group, .c = c, .chunks = chunks };
- try c.parse_graph.pool.pool.doPtr(c.allocator, wait_group, ctx, generateChunkJS, chunks);
+ var ctx = GenerateChunkCtx{ .chunk = &chunks[0], .wg = wait_group, .c = c, .chunks = chunks };
+ try c.parse_graph.pool.pool.doPtr(c.allocator, wait_group, ctx, generateJSRenamer, chunks);
+ }
+
+ {
+ debug(" START waiting for {d} source maps", .{chunks.len});
+ defer debug(" DONE waiting for {d} source maps", .{chunks.len});
+ c.source_maps.wait_group.wait();
+ c.allocator.free(c.source_maps.tasks);
+ c.source_maps.tasks.len = 0;
+ }
+ {
+ var chunk_contexts = c.allocator.alloc(GenerateChunkCtx, chunks.len) catch unreachable;
+ defer c.allocator.free(chunk_contexts);
+ var wait_group = try c.allocator.create(sync.WaitGroup);
+ wait_group.init();
+ defer {
+ wait_group.deinit();
+ c.allocator.destroy(wait_group);
+ }
+ {
+ var total_count: usize = 0;
+ for (chunks, chunk_contexts) |*chunk, *chunk_ctx| {
+ chunk_ctx.* = .{ .wg = wait_group, .c = c, .chunks = chunks, .chunk = chunk };
+ total_count += chunk.content.javascript.parts_in_chunk_in_order.len;
+ chunk.compile_results_for_chunk = c.allocator.alloc(CompileResult, chunk.content.javascript.parts_in_chunk_in_order.len) catch unreachable;
+ }
+
+ debug(" START waiting for {d} compiling part ranges", .{total_count});
+ defer debug(" DONE waiting for {d} compiling part ranges", .{total_count});
+ var combined_part_ranges = c.allocator.alloc(PendingPartRange, total_count) catch unreachable;
+ defer c.allocator.free(combined_part_ranges);
+ var remaining_part_ranges = combined_part_ranges;
+ var batch = ThreadPoolLib.Batch{};
+ for (chunks, chunk_contexts) |*chunk, *chunk_ctx| {
+ for (chunk.content.javascript.parts_in_chunk_in_order, 0..) |part_range, i| {
+ remaining_part_ranges[0] = .{
+ .part_range = part_range,
+ .i = @truncate(u32, i),
+ .task = ThreadPoolLib.Task{
+ .callback = &generateCompileResultForJSChunk,
+ },
+ .ctx = chunk_ctx,
+ };
+ batch.push(ThreadPoolLib.Batch.from(&remaining_part_ranges[0].task));
+
+ remaining_part_ranges = remaining_part_ranges[1..];
+ }
+ }
+ wait_group.counter = @truncate(u32, total_count);
+ c.parse_graph.pool.pool.schedule(batch);
+ wait_group.wait();
+ }
+
+ {
+ debug(" START waiting for {d} postprocess chunks", .{chunks.len});
+ defer debug(" DONE waiting for {d} postprocess chunks", .{chunks.len});
+ wait_group.init();
+ wait_group.counter = @truncate(u32, chunks.len);
+
+ try c.parse_graph.pool.pool.doPtr(c.allocator, wait_group, chunk_contexts[0], generateChunkJS, chunks);
+ }
}
// TODO: enforceNoCyclicChunkImports()
@@ -7941,12 +8283,13 @@ const LinkerContext = struct {
break :brk byte_buffer.items;
} else &.{};
- // Generate the final output files by joining file pieces together
- var output_files = std.ArrayList(options.OutputFile).initCapacity(bun.default_allocator, chunks.len + @as(
- usize,
- @boolToInt(react_client_components_manifest.len > 0) + c.parse_graph.additional_output_files.items.len,
- )) catch unreachable;
- output_files.items.len = chunks.len;
+ var output_files = std.ArrayList(options.OutputFile).initCapacity(
+ bun.default_allocator,
+ (if (c.options.source_maps == .external) chunks.len * 2 else chunks.len) + @as(
+ usize,
+ @boolToInt(react_client_components_manifest.len > 0) + c.parse_graph.additional_output_files.items.len,
+ ),
+ ) catch unreachable;
const root_path = c.resolver.opts.output_dir;
@@ -7954,22 +8297,66 @@ const LinkerContext = struct {
try c.writeOutputFilesToDisk(root_path, chunks, react_client_components_manifest, &output_files);
} else {
// In-memory build
- for (chunks, output_files.items) |*chunk, *output_file| {
- const buffer = chunk.intermediate_output.code(
+ for (chunks) |*chunk| {
+ const _code_result = if (c.options.source_maps != .none) chunk.intermediate_output.codeWithSourceMapShifts(
+ null,
+ c.parse_graph,
+ c.resolver.opts.public_path,
+ chunk,
+ chunks,
+ ) else chunk.intermediate_output.code(
null,
c.parse_graph,
c.resolver.opts.public_path,
chunk,
chunks,
- ) catch @panic("Failed to allocate memory for output file");
- output_file.* = options.OutputFile.initBuf(
- buffer,
- Chunk.IntermediateOutput.allocatorForSize(buffer.len),
+ );
+
+ var code_result = _code_result catch @panic("Failed to allocate memory for output file");
+
+ switch (c.options.source_maps) {
+ .external => {
+ var output_source_map = chunk.output_source_map.finalize(bun.default_allocator, code_result.shifts) catch @panic("Failed to allocate memory for external source map");
+ var source_map_final_rel_path = default_allocator.alloc(u8, chunk.final_rel_path.len + ".map".len) catch unreachable;
+ bun.copy(u8, source_map_final_rel_path, chunk.final_rel_path);
+ bun.copy(u8, source_map_final_rel_path[chunk.final_rel_path.len..], ".map");
+
+ output_files.appendAssumeCapacity(options.OutputFile.initBuf(
+ output_source_map,
+ Chunk.IntermediateOutput.allocatorForSize(output_source_map.len),
+ source_map_final_rel_path,
+ .file,
+ ));
+ },
+ .@"inline" => {
+ var output_source_map = chunk.output_source_map.finalize(bun.default_allocator, code_result.shifts) catch @panic("Failed to allocate memory for external source map");
+ const encode_len = base64.encodeLen(output_source_map);
+
+ const source_map_start = "//# sourceMappingURL=data:application/json;base64,";
+ const total_len = code_result.buffer.len + source_map_start.len + encode_len + 1;
+ var buf = std.ArrayList(u8).initCapacity(Chunk.IntermediateOutput.allocatorForSize(total_len), total_len) catch @panic("Failed to allocate memory for output file with inline source map");
+
+ buf.appendSliceAssumeCapacity(code_result.buffer);
+ buf.appendSliceAssumeCapacity(source_map_start);
+
+ buf.items.len += encode_len;
+ _ = base64.encode(buf.items[buf.items.len - encode_len ..], output_source_map);
+
+ buf.appendAssumeCapacity('\n');
+ Chunk.IntermediateOutput.allocatorForSize(code_result.buffer.len).free(code_result.buffer);
+ code_result.buffer = buf.items;
+ },
+ .none => {},
+ }
+
+ output_files.appendAssumeCapacity(options.OutputFile.initBuf(
+ code_result.buffer,
+ Chunk.IntermediateOutput.allocatorForSize(code_result.buffer.len),
// clone for main thread
bun.default_allocator.dupe(u8, chunk.final_rel_path) catch unreachable,
// TODO: remove this field
.js,
- );
+ ));
}
if (react_client_components_manifest.len > 0) {
@@ -8041,12 +8428,18 @@ const LinkerContext = struct {
// Optimization: when writing to disk, we can re-use the memory
var max_heap_allocator: bun.MaxHeapAllocator = undefined;
- const code_allocator = max_heap_allocator.init(bun.default_allocator);
defer max_heap_allocator.deinit();
+ const code_allocator = max_heap_allocator.init(bun.default_allocator);
+
+ var max_heap_allocator_sourcemap: bun.MaxHeapAllocator = undefined;
+ defer max_heap_allocator_sourcemap.deinit();
+
+ const sourcemap_allocator = max_heap_allocator_sourcemap.init(bun.default_allocator);
+
var pathbuf: [bun.MAX_PATH_BYTES]u8 = undefined;
- for (chunks, output_files.items) |*chunk, *output_file| {
+ for (chunks) |*chunk| {
defer max_heap_allocator.reset();
var rel_path = chunk.final_rel_path;
@@ -8066,13 +8459,93 @@ const LinkerContext = struct {
}
}
- const buffer = chunk.intermediate_output.code(
- code_allocator,
- c.parse_graph,
- c.resolver.opts.public_path,
- chunk,
- chunks,
- ) catch @panic("Failed to allocate memory for output chunk");
+ const _code_result = if (c.options.source_maps != .none)
+ chunk.intermediate_output.codeWithSourceMapShifts(
+ code_allocator,
+ c.parse_graph,
+ c.resolver.opts.public_path,
+ chunk,
+ chunks,
+ )
+ else
+ chunk.intermediate_output.code(
+ code_allocator,
+ c.parse_graph,
+ c.resolver.opts.public_path,
+ chunk,
+ chunks,
+ );
+
+ var code_result = _code_result catch @panic("Failed to allocate memory for output chunk");
+
+ switch (c.options.source_maps) {
+ .external => {
+ var output_source_map = chunk.output_source_map.finalize(sourcemap_allocator, code_result.shifts) catch @panic("Failed to allocate memory for external source map");
+ const source_map_final_rel_path = strings.concat(default_allocator, &.{
+ chunk.final_rel_path,
+ ".map",
+ }) catch @panic("Failed to allocate memory for external source map path");
+
+ switch (JSC.Node.NodeFS.writeFileWithPathBuffer(
+ &pathbuf,
+ JSC.Node.Arguments.WriteFile{
+ .data = JSC.Node.StringOrBuffer{
+ .buffer = JSC.Buffer{
+ .buffer = .{
+ .ptr = @constCast(output_source_map.ptr),
+ // TODO: handle > 4 GB files
+ .len = @truncate(u32, output_source_map.len),
+ .byte_len = @truncate(u32, output_source_map.len),
+ },
+ },
+ },
+ .encoding = .buffer,
+ .dirfd = @intCast(bun.FileDescriptor, root_dir.dir.fd),
+ .file = .{
+ .path = JSC.Node.PathLike{
+ .string = JSC.PathString.init(source_map_final_rel_path),
+ },
+ },
+ },
+ )) {
+ .err => |err| {
+ c.log.addErrorFmt(null, Logger.Loc.Empty, bun.default_allocator, "{} writing sourcemap for chunk {}", .{
+ bun.fmt.quote(err.toSystemError().message.slice()),
+ bun.fmt.quote(chunk.final_rel_path),
+ }) catch unreachable;
+ return error.WriteFailed;
+ },
+ .result => {},
+ }
+
+ output_files.appendAssumeCapacity(options.OutputFile{
+ .input = Fs.Path.init(source_map_final_rel_path),
+ .loader = .json,
+ .size = @truncate(u32, output_source_map.len),
+ .value = .{
+ .saved = .{},
+ },
+ });
+ },
+ .@"inline" => {
+ var output_source_map = chunk.output_source_map.finalize(sourcemap_allocator, code_result.shifts) catch @panic("Failed to allocate memory for external source map");
+ const encode_len = base64.encodeLen(output_source_map);
+
+ const source_map_start = "//# sourceMappingURL=data:application/json;base64,";
+ const total_len = code_result.buffer.len + source_map_start.len + encode_len + 1;
+ var buf = std.ArrayList(u8).initCapacity(sourcemap_allocator, total_len) catch @panic("Failed to allocate memory for output file with inline source map");
+
+ buf.appendSliceAssumeCapacity(code_result.buffer);
+ buf.appendSliceAssumeCapacity(source_map_start);
+
+ buf.items.len += encode_len;
+ _ = base64.encode(buf.items[buf.items.len - encode_len ..], output_source_map);
+
+ buf.appendAssumeCapacity('\n');
+ code_result.buffer = buf.items;
+ },
+ .none => {},
+ }
switch (JSC.Node.NodeFS.writeFileWithPathBuffer(
&pathbuf,
@@ -8080,10 +8553,10 @@ const LinkerContext = struct {
.data = JSC.Node.StringOrBuffer{
.buffer = JSC.Buffer{
.buffer = .{
- .ptr = @constCast(buffer.ptr),
+ .ptr = @constCast(code_result.buffer.ptr),
// TODO: handle > 4 GB files
- .len = @truncate(u32, buffer.len),
- .byte_len = @truncate(u32, buffer.len),
+ .len = @truncate(u32, code_result.buffer.len),
+ .byte_len = @truncate(u32, code_result.buffer.len),
},
},
},
@@ -8106,14 +8579,14 @@ const LinkerContext = struct {
.result => {},
}
- output_file.* = options.OutputFile{
+ output_files.appendAssumeCapacity(options.OutputFile{
.input = Fs.Path.init(bun.default_allocator.dupe(u8, chunk.final_rel_path) catch unreachable),
.loader = .js,
- .size = @truncate(u32, buffer.len),
+ .size = @truncate(u32, code_result.buffer.len),
.value = .{
.saved = .{},
},
- };
+ });
}
if (react_client_components_manifest.len > 0) {
@@ -9518,9 +9991,15 @@ pub const Chunk = struct {
is_executable: bool = false,
+ output_source_map: sourcemap.SourceMapPieces,
+
intermediate_output: IntermediateOutput = .{ .empty = {} },
isolated_hash: u64 = std.math.maxInt(u64),
+ renamer: renamer.Renamer = undefined,
+
+ compile_results_for_chunk: []CompileResult = &.{},
+
pub inline fn isEntryPoint(this: *const Chunk) bool {
return this.entry_point.is_entry_point;
}
@@ -9571,6 +10050,169 @@ pub const Chunk = struct {
return bun.default_allocator;
}
+ pub const CodeResult = struct {
+ buffer: string,
+ shifts: []sourcemap.SourceMapShifts,
+ };
+
+ pub fn codeWithSourceMapShifts(
+ this: IntermediateOutput,
+ allocator_to_use: ?std.mem.Allocator,
+ graph: *const Graph,
+ import_prefix: []const u8,
+ chunk: *Chunk,
+ chunks: []Chunk,
+ ) !CodeResult {
+ const additional_files = graph.input_files.items(.additional_files);
+ const unique_key_for_additional_files = graph.input_files.items(.unique_key_for_additional_file);
+ switch (this) {
+ .pieces => |*pieces| {
+ var shift = sourcemap.SourceMapShifts{
+ .after = .{},
+ .before = .{},
+ };
+
+ var shifts = try std.ArrayList(sourcemap.SourceMapShifts).initCapacity(bun.default_allocator, pieces.len + 1);
+ shifts.appendAssumeCapacity(shift);
+
+ var count: usize = 0;
+ var from_chunk_dir = std.fs.path.dirname(chunk.final_rel_path) orelse "";
+ if (strings.eqlComptime(from_chunk_dir, "."))
+ from_chunk_dir = "";
+
+ for (pieces.slice()) |piece| {
+ count += piece.data_len;
+
+ switch (piece.index.kind) {
+ .chunk, .asset => {
+ const index = piece.index.index;
+ const file_path = switch (piece.index.kind) {
+ .asset => graph.additional_output_files.items[additional_files[index].last().?.output_file].input.text,
+ .chunk => chunks[index].final_rel_path,
+ else => unreachable,
+ };
+
+ const cheap_normalizer = cheapPrefixNormalizer(
+ import_prefix,
+ if (from_chunk_dir.len == 0)
+ file_path
+ else
+ bun.path.relative(from_chunk_dir, file_path),
+ );
+ count += cheap_normalizer[0].len + cheap_normalizer[1].len;
+ },
+ .none => {},
+ }
+ }
+
+ const debug_id_len = if (comptime FeatureFlags.source_map_debug_id)
+ std.fmt.count("\n//# debugId={}\n", .{bun.sourcemap.DebugIDFormatter{ .id = chunk.isolated_hash }})
+ else
+ 0;
+
+ var total_buf = try (allocator_to_use orelse allocatorForSize(count)).alloc(u8, count + debug_id_len);
+ var remain = total_buf;
+
+ for (pieces.slice()) |piece| {
+ const data = piece.data();
+
+ var data_offset = sourcemap.LineColumnOffset{};
+ data_offset.advance(data);
+ shift.before.add(data_offset);
+ shift.after.add(data_offset);
+
+ if (data.len > 0)
+ @memcpy(remain.ptr, data.ptr, data.len);
+
+ remain = remain[data.len..];
+
+ switch (piece.index.kind) {
+ .asset, .chunk => {
+ const index = piece.index.index;
+ const file_path = brk: {
+ switch (piece.index.kind) {
+ .asset => {
+ shift.before.advance(unique_key_for_additional_files[index]);
+ const file = graph.additional_output_files.items[additional_files[index].last().?.output_file];
+ break :brk file.input.text;
+ },
+ .chunk => {
+ const piece_chunk = chunks[index];
+ shift.before.advance(piece_chunk.unique_key);
+ break :brk piece_chunk.final_rel_path;
+ },
+ else => unreachable,
+ }
+ };
+
+ const cheap_normalizer = cheapPrefixNormalizer(
+ import_prefix,
+ if (from_chunk_dir.len == 0)
+ file_path
+ else
+ bun.path.relative(from_chunk_dir, file_path),
+ );
+
+ if (cheap_normalizer[0].len > 0) {
+ @memcpy(remain.ptr, cheap_normalizer[0].ptr, cheap_normalizer[0].len);
+ remain = remain[cheap_normalizer[0].len..];
+ shift.after.advance(cheap_normalizer[0]);
+ }
+
+ if (cheap_normalizer[1].len > 0) {
+ @memcpy(remain.ptr, cheap_normalizer[1].ptr, cheap_normalizer[1].len);
+ remain = remain[cheap_normalizer[1].len..];
+ shift.after.advance(cheap_normalizer[1]);
+ }
+
+ shifts.appendAssumeCapacity(shift);
+ },
+ .none => {},
+ }
+ }
+
+ if (comptime FeatureFlags.source_map_debug_id) {
+ // This comment must go before the //# sourceMappingURL comment
+ remain = remain[(std.fmt.bufPrint(
+ remain,
+ "\n//# debugId={}\n",
+ .{bun.sourcemap.DebugIDFormatter{ .id = chunk.isolated_hash }},
+ ) catch unreachable).len..];
+ }
+
+ std.debug.assert(remain.len == 0);
+ std.debug.assert(total_buf.len == count);
+
+ return .{
+ .buffer = total_buf,
+ .shifts = shifts.items,
+ };
+ },
+ .joiner => |joiner_| {
+ // TODO: make this safe
+ var joiny = joiner_;
+
+ if (comptime FeatureFlags.source_map_debug_id) {
+ // This comment must go before the //# sourceMappingURL comment
+ joiny.push(std.fmt.allocPrint(
+ graph.allocator,
+ "\n//# debugId={}\n",
+ .{bun.sourcemap.DebugIDFormatter{ .id = chunk.isolated_hash }},
+ ) catch unreachable);
+ }
+
+ return .{
+ .buffer = try joiny.done((allocator_to_use orelse allocatorForSize(joiny.len))),
+ .shifts = &[_]sourcemap.SourceMapShifts{},
+ };
+ },
+ .empty => return .{
+ .buffer = "",
+ .shifts = &[_]sourcemap.SourceMapShifts{},
+ },
+ }
+ }
+
pub fn code(
this: IntermediateOutput,
allocator_to_use: ?std.mem.Allocator,
@@ -9578,7 +10220,7 @@ pub const Chunk = struct {
import_prefix: []const u8,
chunk: *Chunk,
chunks: []Chunk,
- ) ![]const u8 {
+ ) !CodeResult {
const additional_files = graph.input_files.items(.additional_files);
switch (this) {
.pieces => |*pieces| {
@@ -9659,14 +10301,23 @@ pub const Chunk = struct {
std.debug.assert(remain.len == 0);
std.debug.assert(total_buf.len == count);
- return total_buf;
+ return .{
+ .buffer = total_buf,
+ .shifts = &[_]sourcemap.SourceMapShifts{},
+ };
},
.joiner => |joiner_| {
// TODO: make this safe
var joiny = joiner_;
- return joiny.done((allocator_to_use orelse allocatorForSize(joiny.len)));
+ return .{
+ .buffer = try joiny.done((allocator_to_use orelse allocatorForSize(joiny.len))),
+ .shifts = &[_]sourcemap.SourceMapShifts{},
+ };
+ },
+ .empty => return .{
+ .buffer = "",
+ .shifts = &[_]sourcemap.SourceMapShifts{},
},
- .empty => return "",
}
}
};
@@ -9823,6 +10474,12 @@ const CompileResult = union(enum) {
}
};
+const CompileResultForSourceMap = struct {
+ source_map_chunk: sourcemap.Chunk,
+ generated_offset: sourcemap.LineColumnOffset,
+ source_index: u32,
+};
+
const ContentHasher = struct {
// xxhash64 outperforms Wyhash if the file is > 1KB or so
hasher: std.hash.XxHash64 = std.hash.XxHash64.init(0),
diff --git a/src/cli/build_command.zig b/src/cli/build_command.zig
index 0518c7b5a..c45db8724 100644
--- a/src/cli/build_command.zig
+++ b/src/cli/build_command.zig
@@ -44,6 +44,15 @@ pub const BuildCommand = struct {
estimated_input_lines_of_code_ = 0;
var this_bundler = try bundler.Bundler.init(allocator, log, ctx.args, null, null);
+
+ this_bundler.options.source_map = options.SourceMapOption.fromApi(ctx.args.source_map);
+ this_bundler.resolver.opts.source_map = options.SourceMapOption.fromApi(ctx.args.source_map);
+
+ if (this_bundler.options.source_map == .external and ctx.bundler_options.outdir.len == 0) {
+ Output.prettyErrorln("<r><red>error<r><d>:<r> cannot use an external source map without --outdir", .{});
+ Global.exit(1);
+ return;
+ }
this_bundler.options.entry_naming = ctx.bundler_options.entry_naming;
this_bundler.options.chunk_naming = ctx.bundler_options.chunk_naming;
this_bundler.options.asset_naming = ctx.bundler_options.asset_naming;
@@ -66,6 +75,20 @@ pub const BuildCommand = struct {
this_bundler.options.minify_identifiers = ctx.bundler_options.minify_identifiers;
this_bundler.resolver.opts.minify_identifiers = ctx.bundler_options.minify_identifiers;
+ if (this_bundler.options.entry_points.len > 1 and ctx.bundler_options.outdir.len == 0) {
+ Output.prettyErrorln("error: to use multiple entry points, specify --outdir", .{});
+ Global.exit(1);
+ return;
+ }
+
+ this_bundler.options.output_dir = ctx.bundler_options.outdir;
+ this_bundler.resolver.opts.output_dir = ctx.bundler_options.outdir;
+
+ this_bundler.options.react_server_components = ctx.bundler_options.react_server_components;
+ this_bundler.resolver.opts.react_server_components = ctx.bundler_options.react_server_components;
+ this_bundler.options.code_splitting = ctx.bundler_options.code_splitting;
+ this_bundler.resolver.opts.code_splitting = ctx.bundler_options.code_splitting;
+
this_bundler.configureLinker();
// This step is optional
@@ -96,12 +119,6 @@ pub const BuildCommand = struct {
return;
}
- if (ctx.debug.dump_limits) {
- fs.FileSystem.printLimits();
- Global.exit(0);
- return;
- }
-
const output_files: []options.OutputFile = brk: {
if (ctx.bundler_options.transform_only) {
this_bundler.linker.options.resolve_mode = .lazy;
@@ -159,15 +176,16 @@ pub const BuildCommand = struct {
output_files[0].input.text = std.fs.path.basename(ctx.bundler_options.outfile);
}
- if (output_dir.len == 0 and ctx.bundler_options.outfile.len == 0) {
+ if (ctx.bundler_options.outfile.len == 0 and output_files.len == 1 and ctx.bundler_options.outdir.len == 0) {
// if --transform is passed, it won't have an output dir
if (output_files[0].value == .buffer)
try writer.writeAll(output_files[0].value.buffer.bytes);
break :dump;
}
- const root_path = output_dir;
+ var root_path = output_dir;
const root_dir = try std.fs.cwd().makeOpenPathIterable(root_path, .{});
+ if (root_path.len == 0 and ctx.args.entry_points.len == 1) root_path = std.fs.path.dirname(ctx.args.entry_points[0]) orelse ".";
var all_paths = try ctx.allocator.alloc([]const u8, output_files.len);
var max_path_len: usize = 0;
for (all_paths, output_files) |*dest, src| {
diff --git a/src/feature_flags.zig b/src/feature_flags.zig
index c2055cbb6..c7a2f4819 100644
--- a/src/feature_flags.zig
+++ b/src/feature_flags.zig
@@ -161,3 +161,8 @@ pub const help_catch_memory_issues = @import("root").bun.Environment.allow_asser
pub const unwrap_commonjs_to_esm = true;
pub const boundary_based_chunking = true;
+
+/// https://sentry.engineering/blog/the-case-for-debug-ids
+/// https://github.com/mitsuhiko/source-map-rfc/blob/proposals/debug-id/proposals/debug-id.md
+/// https://github.com/source-map/source-map-rfc/pull/20
+pub const source_map_debug_id = true;
diff --git a/src/js_ast.zig b/src/js_ast.zig
index 69177d4e6..ecf90faf3 100644
--- a/src/js_ast.zig
+++ b/src/js_ast.zig
@@ -5198,7 +5198,11 @@ pub const EnumValue = struct {
};
pub const S = struct {
- pub const Block = struct { stmts: StmtNodeList };
+ pub const Block = struct {
+ stmts: StmtNodeList,
+ close_brace_loc: logger.Loc = logger.Loc.Empty,
+ };
+
pub const SExpr = struct {
value: ExprNodeIndex,
diff --git a/src/js_parser.zig b/src/js_parser.zig
index 649434690..4cf4b1aec 100644
--- a/src/js_parser.zig
+++ b/src/js_parser.zig
@@ -9565,9 +9565,11 @@ fn NewParser_(
try p.lexer.next();
var stmtOpts = ParseStatementOptions{};
const stmts = try p.parseStmtsUpTo(.t_close_brace, &stmtOpts);
+ const close_brace_loc = p.lexer.loc();
try p.lexer.next();
return p.s(S.Block{
.stmts = stmts,
+ .close_brace_loc = close_brace_loc,
}, loc);
},
@@ -10649,7 +10651,7 @@ fn NewParser_(
return p.s(S.Enum{
.name = name,
.arg = arg_ref,
- .values = try values.toOwnedSlice(),
+ .values = values.items,
.is_export = opts.is_export,
}, loc);
}
@@ -10974,7 +10976,7 @@ fn NewParser_(
}
}
- return try stmts.toOwnedSlice();
+ return stmts.items;
}
fn markStrictModeFeature(p: *P, feature: StrictModeFeature, r: logger.Range, detail: string) !void {
@@ -12371,7 +12373,7 @@ fn NewParser_(
.ts_decorators = ExprNodeList.init(class_opts.ts_decorators),
.class_keyword = class_keyword,
.body_loc = body_loc,
- .properties = try properties.toOwnedSlice(),
+ .properties = properties.items,
.has_decorators = has_decorators or class_opts.ts_decorators.len > 0,
};
}
@@ -12430,7 +12432,7 @@ fn NewParser_(
p.allow_in = oldAllowIn;
- return try parts.toOwnedSlice();
+ return parts.items;
}
// This assumes the caller has already checked for TStringLiteral or TNoSubstitutionTemplateLiteral
@@ -14452,7 +14454,7 @@ fn NewParser_(
}
if (partStmts.items.len > 0) {
- const _stmts = try partStmts.toOwnedSlice();
+ const _stmts = partStmts.items;
// -- hoist_bun_plugin --
if (_stmts.len == 1 and p.options.features.hoist_bun_plugin and !p.bun_plugin.ref.isNull()) {
@@ -14729,7 +14731,7 @@ fn NewParser_(
var stmts = ListManaged(Stmt).fromOwnedSlice(p.allocator, body.stmts);
var temp_opts = PrependTempRefsOpts{ .kind = StmtsKind.fn_body, .fn_body_loc = body.loc };
p.visitStmtsAndPrependTempRefs(&stmts, &temp_opts) catch unreachable;
- func.body = G.FnBody{ .stmts = stmts.toOwnedSlice() catch @panic("TODO"), .loc = body.loc };
+ func.body = G.FnBody{ .stmts = stmts.items, .loc = body.loc };
p.popScope();
p.popScope();
@@ -16521,7 +16523,7 @@ fn NewParser_(
var temp_opts = PrependTempRefsOpts{ .kind = StmtsKind.fn_body };
p.visitStmtsAndPrependTempRefs(&stmts_list, &temp_opts) catch unreachable;
p.allocator.free(e_.body.stmts);
- e_.body.stmts = stmts_list.toOwnedSlice() catch @panic("TODO");
+ e_.body.stmts = stmts_list.items;
p.popScope();
p.popScope();
@@ -17652,7 +17654,7 @@ fn NewParser_(
var items = try List(js_ast.ClauseItem).initCapacity(p.allocator, 1);
items.appendAssumeCapacity(js_ast.ClauseItem{ .alias = alias.original_name, .original_name = alias.original_name, .alias_loc = alias.loc, .name = LocRef{ .loc = alias.loc, .ref = data.namespace_ref } });
- stmts.appendAssumeCapacity(p.s(S.ExportClause{ .items = items.toOwnedSlice(p.allocator) catch @panic("TODO"), .is_single_line = true }, stmt.loc));
+ stmts.appendAssumeCapacity(p.s(S.ExportClause{ .items = items.items, .is_single_line = true }, stmt.loc));
return;
}
}
@@ -18016,7 +18018,7 @@ fn NewParser_(
const kind = if (std.meta.eql(p.loop_body, stmt.data)) StmtsKind.loop_body else StmtsKind.none;
var _stmts = ListManaged(Stmt).fromOwnedSlice(p.allocator, data.stmts);
p.visitStmts(&_stmts, kind) catch unreachable;
- data.stmts = _stmts.toOwnedSlice() catch @panic("TODO");
+ data.stmts = _stmts.items;
p.popScope();
}
@@ -18216,7 +18218,7 @@ fn NewParser_(
p.fn_or_arrow_data_visit.try_body_count += 1;
p.visitStmts(&_stmts, StmtsKind.none) catch unreachable;
p.fn_or_arrow_data_visit.try_body_count -= 1;
- data.body = _stmts.toOwnedSlice() catch @panic("TODO");
+ data.body = _stmts.items;
}
p.popScope();
@@ -18228,7 +18230,7 @@ fn NewParser_(
}
var _stmts = ListManaged(Stmt).fromOwnedSlice(p.allocator, catch_.body);
p.visitStmts(&_stmts, StmtsKind.none) catch unreachable;
- catch_.body = _stmts.toOwnedSlice() catch @panic("TODO");
+ catch_.body = _stmts.items;
}
p.popScope();
}
@@ -18238,7 +18240,7 @@ fn NewParser_(
{
var _stmts = ListManaged(Stmt).fromOwnedSlice(p.allocator, finally.stmts);
p.visitStmts(&_stmts, StmtsKind.none) catch unreachable;
- finally.stmts = _stmts.toOwnedSlice() catch @panic("TODO");
+ finally.stmts = _stmts.items;
}
p.popScope();
}
@@ -18262,7 +18264,7 @@ fn NewParser_(
}
var _stmts = ListManaged(Stmt).fromOwnedSlice(p.allocator, case.body);
p.visitStmts(&_stmts, StmtsKind.none) catch unreachable;
- data.cases[i].body = _stmts.toOwnedSlice() catch @panic("TODO");
+ data.cases[i].body = _stmts.items;
}
}
// TODO: duplicate case checker
@@ -18497,7 +18499,7 @@ fn NewParser_(
data.name.loc,
data.name.ref.?,
data.arg,
- try value_stmts.toOwnedSlice(),
+ value_stmts.items,
);
return;
},
@@ -19708,8 +19710,8 @@ fn NewParser_(
}
}
- class.properties = class_body.toOwnedSlice() catch unreachable;
- constructor.func.body.stmts = stmts.toOwnedSlice() catch unreachable;
+ class.properties = class_body.items;
+ constructor.func.body.stmts = stmts.items;
}
}
}
diff --git a/src/js_printer.zig b/src/js_printer.zig
index da50024f2..718632ef6 100644
--- a/src/js_printer.zig
+++ b/src/js_printer.zig
@@ -491,6 +491,7 @@ pub const Options = struct {
rewrite_require_resolve: bool = true,
allocator: std.mem.Allocator = default_allocator,
source_map_handler: ?SourceMapHandler = null,
+ source_map_builder: ?*bun.sourcemap.Chunk.Builder = null,
css_import_behavior: Api.CssInJsBehavior = Api.CssInJsBehavior.facade,
commonjs_named_exports: js_ast.Ast.CommonJSNamedExports = .{},
@@ -527,7 +528,7 @@ pub const Options = struct {
// If we're writing out a source map, this table of line start indices lets
// us do binary search on to figure out what line a given AST node came from
- // line_offset_tables: []LineOffsetTable
+ line_offset_tables: ?SourceMap.LineOffsetTable.List = null,
pub inline fn unindent(self: *Options) void {
self.indent -|= 1;
@@ -976,7 +977,7 @@ fn NewPrinter(
switch (stmt.data) {
.s_block => |block| {
p.printSpace();
- p.printBlock(stmt.loc, block.stmts);
+ p.printBlock(stmt.loc, block.stmts, block.close_brace_loc);
p.printNewline();
},
else => {
@@ -995,7 +996,7 @@ fn NewPrinter(
}
}
- pub fn printBlock(p: *Printer, loc: logger.Loc, stmts: []const Stmt) void {
+ pub fn printBlock(p: *Printer, loc: logger.Loc, stmts: []const Stmt, close_brace_loc: ?logger.Loc) void {
p.addSourceMapping(loc);
p.print("{");
p.printNewline();
@@ -1006,6 +1007,9 @@ fn NewPrinter(
p.needs_semicolon = false;
p.printIndent();
+ if (close_brace_loc != null and close_brace_loc.?.start > loc.start) {
+ p.addSourceMapping(close_brace_loc.?);
+ }
p.print("}");
}
@@ -1177,6 +1181,19 @@ fn NewPrinter(
printer.source_map_builder.addSourceMapping(location, printer.writer.slice());
}
+ // pub inline fn addSourceMappingForName(printer: *Printer, location: logger.Loc, name: string, ref: Ref) void {
+ // _ = location;
+ // if (comptime !generate_source_map) {
+ // return;
+ // }
+
+ // if (printer.symbols().get(printer.symbols().follow(ref))) |symbol| {
+ // if (!strings.eqlLong(symbol.original_name, name)) {
+ // printer.source_map_builder.addSourceMapping()
+ // }
+ // }
+ // }
+
pub fn printSymbol(p: *Printer, ref: Ref) void {
std.debug.assert(!ref.isNull());
const name = p.renamer.nameForSymbol(ref);
@@ -1237,7 +1254,7 @@ fn NewPrinter(
pub fn printFunc(p: *Printer, func: G.Fn) void {
p.printFnArgs(func.open_parens_loc, func.args, func.flags.contains(.has_rest_arg), false);
p.printSpace();
- p.printBlock(func.body.loc, func.body.stmts);
+ p.printBlock(func.body.loc, func.body.stmts, null);
}
pub fn printClass(p: *Printer, class: G.Class) void {
if (class.extends) |extends| {
@@ -1260,7 +1277,7 @@ fn NewPrinter(
if (item.kind == .class_static_block) {
p.print("static");
p.printSpace();
- p.printBlock(item.class_static_block.?.loc, item.class_static_block.?.stmts.slice());
+ p.printBlock(item.class_static_block.?.loc, item.class_static_block.?.stmts.slice(), null);
p.printNewline();
continue;
}
@@ -2313,7 +2330,7 @@ fn NewPrinter(
}
if (!wasPrinted) {
- p.printBlock(e.body.loc, e.body.stmts);
+ p.printBlock(e.body.loc, e.body.stmts, null);
}
if (wrap) {
@@ -2737,7 +2754,8 @@ fn NewPrinter(
}
},
.e_unary => |e| {
- const entry: Op = Op.Table.get(e.op);
+ // 4.00 ms eums.EnumIndexer(src.js_ast.Op.Code).indexOf
+ const entry: *const Op = Op.Table.getPtrConst(e.op);
const wrap = level.gte(entry.level);
if (wrap) {
@@ -2768,8 +2786,11 @@ fn NewPrinter(
}
},
.e_binary => |e| {
- const entry: Op = Op.Table.get(e.op);
- var wrap = level.gte(entry.level) or (e.op == Op.Code.bin_in and flags.contains(.forbid_in));
+ // 4.00 ms enums.EnumIndexer(src.js_ast.Op.Code).indexOf
+ const entry: *const Op = Op.Table.getPtrConst(e.op);
+ const e_level = entry.level;
+
+ var wrap = level.gte(e_level) or (e.op == Op.Code.bin_in and flags.contains(.forbid_in));
// Destructuring assignments must be parenthesized
const n = p.writer.written;
@@ -2787,15 +2808,15 @@ fn NewPrinter(
flags.insert(.forbid_in);
}
- var left_level = entry.level.sub(1);
- var right_level = entry.level.sub(1);
+ var left_level = e_level.sub(1);
+ var right_level = e_level.sub(1);
if (e.op.isRightAssociative()) {
- left_level = entry.level;
+ left_level = e_level;
}
if (e.op.isLeftAssociative()) {
- right_level = entry.level;
+ right_level = e_level;
}
switch (e.op) {
@@ -3964,7 +3985,7 @@ fn NewPrinter(
switch (s.body.data) {
.s_block => {
p.printSpace();
- p.printBlock(s.body.loc, s.body.data.s_block.stmts);
+ p.printBlock(s.body.loc, s.body.data.s_block.stmts, s.body.data.s_block.close_brace_loc);
p.printSpace();
},
else => {
@@ -4049,7 +4070,7 @@ fn NewPrinter(
p.printSpaceBeforeIdentifier();
p.print("try");
p.printSpace();
- p.printBlock(s.body_loc, s.body);
+ p.printBlock(s.body_loc, s.body, null);
if (s.catch_) |catch_| {
p.printSpace();
@@ -4061,14 +4082,14 @@ fn NewPrinter(
p.print(")");
}
p.printSpace();
- p.printBlock(catch_.loc, catch_.body);
+ p.printBlock(catch_.loc, catch_.body, null);
}
if (s.finally) |finally| {
p.printSpace();
p.print("finally");
p.printSpace();
- p.printBlock(finally.loc, finally.stmts);
+ p.printBlock(finally.loc, finally.stmts, null);
}
p.printNewline();
@@ -4133,7 +4154,7 @@ fn NewPrinter(
switch (c.body[0].data) {
.s_block => {
p.printSpace();
- p.printBlock(c.body[0].loc, c.body[0].data.s_block.stmts);
+ p.printBlock(c.body[0].loc, c.body[0].data.s_block.stmts, c.body[0].data.s_block.close_brace_loc);
p.printNewline();
continue;
},
@@ -4532,7 +4553,7 @@ fn NewPrinter(
},
.s_block => |s| {
p.printIndent();
- p.printBlock(stmt.loc, s.stmts);
+ p.printBlock(stmt.loc, s.stmts, s.close_brace_loc);
p.printNewline();
},
.s_debugger => {
@@ -4595,7 +4616,11 @@ fn NewPrinter(
p.printSemicolonAfterStatement();
},
.s_expr => |s| {
- p.printIndent();
+ if (!p.options.minify_whitespace and p.options.indent > 0) {
+ p.addSourceMapping(stmt.loc);
+ p.printIndent();
+ }
+
p.stmt_start = p.writer.written;
p.printExpr(s.value, .lowest, ExprFlag.ExprResultIsUnused());
p.printSemicolonAfterStatement();
@@ -4856,7 +4881,7 @@ fn NewPrinter(
switch (s.yes.data) {
.s_block => |block| {
p.printSpace();
- p.printBlock(s.yes.loc, block.stmts);
+ p.printBlock(s.yes.loc, block.stmts, block.close_brace_loc);
if (s.no != null) {
p.printSpace();
@@ -4904,7 +4929,7 @@ fn NewPrinter(
switch (no_block.data) {
.s_block => {
p.printSpace();
- p.printBlock(no_block.loc, no_block.data.s_block.stmts);
+ p.printBlock(no_block.loc, no_block.data.s_block.stmts, null);
p.printNewline();
},
.s_if => {
@@ -5145,7 +5170,7 @@ fn NewPrinter(
imported_module_ids_list.clearRetainingCapacity();
- return Printer{
+ var printer = Printer{
.import_records = import_records,
.options = opts,
.writer = writer,
@@ -5153,6 +5178,16 @@ fn NewPrinter(
.renamer = renamer,
.source_map_builder = source_map_builder,
};
+ if (comptime generate_source_map) {
+ // This seems silly to cache but the .items() function apparently costs 1ms according to Instruments.
+ printer.source_map_builder.line_offset_table_byte_offset_list =
+ printer
+ .source_map_builder
+ .line_offset_tables
+ .items(.byte_offset_to_start_of_line);
+ }
+
+ return printer;
}
};
}
@@ -5555,7 +5590,7 @@ pub const Format = enum {
cjs_ascii,
};
-fn getSourceMapBuilder(
+pub fn getSourceMapBuilder(
comptime generate_source_map: bool,
comptime is_bun_platform: bool,
opts: Options,
@@ -5572,7 +5607,7 @@ fn getSourceMapBuilder(
),
.cover_lines_without_mappings = true,
.prepend_count = is_bun_platform,
- .line_offset_tables = SourceMap.LineOffsetTable.generate(
+ .line_offset_tables = opts.line_offset_tables orelse SourceMap.LineOffsetTable.generate(
opts.allocator,
source.contents,
@intCast(
@@ -5663,12 +5698,7 @@ pub fn printAst(
defer {
if (opts.minify_identifiers) {
- for (&renamer.MinifyRenamer.slots.values) |*val| {
- val.deinit();
- }
- renamer.MinifyRenamer.reserved_names.deinit(opts.allocator);
- renamer.MinifyRenamer.top_level_symbol_to_slot.deinit(opts.allocator);
- opts.allocator.destroy(renamer.MinifyRenamer);
+ renamer.deinit(opts.allocator);
}
}
@@ -5763,10 +5793,13 @@ pub fn printJSON(
pub fn print(
allocator: std.mem.Allocator,
target: options.Target,
+ ast: Ast,
+ source: *const logger.Source,
opts: Options,
import_records: []const ImportRecord,
parts: []const js_ast.Part,
renamer: bun.renamer.Renamer,
+ comptime generate_source_maps: bool,
) PrintResult {
var buffer_writer = BufferWriter.init(allocator) catch |err| return .{ .err = err };
var buffer_printer = BufferPrinter.init(buffer_writer);
@@ -5775,10 +5808,13 @@ pub fn print(
*BufferPrinter,
&buffer_printer,
target,
+ ast,
+ source,
opts,
import_records,
parts,
renamer,
+ comptime generate_source_maps,
);
}
@@ -5786,20 +5822,26 @@ pub fn printWithWriter(
comptime Writer: type,
_writer: Writer,
target: options.Target,
+ ast: Ast,
+ source: *const logger.Source,
opts: Options,
import_records: []const ImportRecord,
parts: []const js_ast.Part,
renamer: bun.renamer.Renamer,
+ comptime generate_source_maps: bool,
) PrintResult {
return switch (target.isBun()) {
inline else => |is_bun| printWithWriterAndPlatform(
Writer,
_writer,
is_bun,
+ ast,
+ source,
opts,
import_records,
parts,
renamer,
+ generate_source_maps,
),
};
}
@@ -5808,19 +5850,22 @@ pub fn printWithWriter(
pub fn printWithWriterAndPlatform(
comptime Writer: type,
_writer: Writer,
- comptime is_bun: bool,
+ comptime is_bun_platform: bool,
+ ast: Ast,
+ source: *const logger.Source,
opts: Options,
import_records: []const ImportRecord,
parts: []const js_ast.Part,
renamer: bun.renamer.Renamer,
+ comptime generate_source_maps: bool,
) PrintResult {
const PrinterType = NewPrinter(
false,
Writer,
false,
- is_bun,
- false,
+ is_bun_platform,
false,
+ generate_source_maps,
);
var writer = _writer;
var printer = PrinterType.init(
@@ -5828,7 +5873,7 @@ pub fn printWithWriterAndPlatform(
import_records,
opts,
renamer,
- undefined,
+ getSourceMapBuilder(generate_source_maps, is_bun_platform, opts, source, &ast),
);
defer printer.temporary_bindings.deinit(bun.default_allocator);
defer _writer.* = printer.writer.*;
@@ -5851,9 +5896,18 @@ pub fn printWithWriterAndPlatform(
printer.writer.done() catch |err|
return .{ .err = err };
+ const written = printer.writer.ctx.getWritten();
+ const source_map: ?SourceMap.Chunk = if (generate_source_maps and written.len > 0) brk: {
+ const chunk = printer.source_map_builder.generateChunk(written);
+ if (chunk.should_ignore)
+ break :brk null;
+ break :brk chunk;
+ } else null;
+
return .{
.result = .{
- .code = writer.ctx.getWritten(),
+ .code = written,
+ .source_map = source_map,
},
};
}
diff --git a/src/options.zig b/src/options.zig
index a701cd035..c9077b64b 100644
--- a/src/options.zig
+++ b/src/options.zig
@@ -1370,7 +1370,7 @@ pub const BundleOptions = struct {
conditions: ESMConditions = undefined,
tree_shaking: bool = false,
code_splitting: bool = false,
- sourcemap: SourceMapOption = SourceMapOption.none,
+ source_map: SourceMapOption = SourceMapOption.none,
disable_transpilation: bool = false,
@@ -1667,7 +1667,7 @@ pub const BundleOptions = struct {
if (opts.framework == null)
opts.env.behavior = .load_all;
- opts.sourcemap = SourceMapOption.fromApi(transform.source_map orelse Api.SourceMapMode.external);
+ opts.source_map = SourceMapOption.fromApi(transform.source_map orelse Api.SourceMapMode.external);
opts.resolve_mode = .lazy;
@@ -1809,7 +1809,7 @@ pub const BundleOptions = struct {
opts.serve = true;
} else {
- opts.sourcemap = SourceMapOption.fromApi(transform.source_map orelse Api.SourceMapMode._none);
+ opts.source_map = SourceMapOption.fromApi(transform.source_map orelse Api.SourceMapMode._none);
}
opts.tree_shaking = opts.serve or opts.target.isBun() or opts.production or is_generating_bundle;
diff --git a/src/renamer.zig b/src/renamer.zig
index 2775986ff..60414e2e7 100644
--- a/src/renamer.zig
+++ b/src/renamer.zig
@@ -69,10 +69,10 @@ pub const Renamer = union(enum) {
};
}
- pub fn deinit(renamer: Renamer) void {
+ pub fn deinit(renamer: Renamer, allocator: std.mem.Allocator) void {
switch (renamer) {
.NumberRenamer => |r| r.deinit(),
- .MinifyRenamer => |r| r.deinit(),
+ .MinifyRenamer => |r| r.deinit(allocator),
else => {},
}
}
@@ -170,8 +170,13 @@ pub const MinifyRenamer = struct {
return renamer;
}
- pub fn deinit(this: *MinifyRenamer) void {
- _ = this;
+ pub fn deinit(this: *MinifyRenamer, allocator: std.mem.Allocator) void {
+ for (&this.slots.values) |*val| {
+ val.deinit();
+ }
+ this.reserved_names.deinit(allocator);
+ this.top_level_symbol_to_slot.deinit(allocator);
+ allocator.destroy(this);
}
pub fn toRenamer(this: *MinifyRenamer) Renamer {
@@ -323,7 +328,7 @@ pub const MinifyRenamer = struct {
}
},
.label => {
- while (JSLexer.Keywords.get(name_buf.items)) |_| {
+ while (JSLexer.Keywords.has(name_buf.items)) {
try name_minifier.numberToMinifiedName(&name_buf, next_name);
next_name += 1;
}
diff --git a/src/sourcemap/sourcemap.zig b/src/sourcemap/sourcemap.zig
index f7a92a9c1..b7441ccc9 100644
--- a/src/sourcemap/sourcemap.zig
+++ b/src/sourcemap/sourcemap.zig
@@ -5,6 +5,7 @@ pub const VLQ_CONTINUATION_BIT: u32 = VLQ_BASE;
pub const VLQ_CONTINUATION_MASK: u32 = 1 << VLQ_CONTINUATION_BIT;
const std = @import("std");
const bun = @import("root").bun;
+const string = bun.string;
const JSAst = bun.JSAst;
const BabyList = JSAst.BabyList;
const Logger = @import("root").bun.logger;
@@ -315,7 +316,7 @@ pub const Mapping = struct {
};
};
-pub const LineColumnOffset = packed struct {
+pub const LineColumnOffset = struct {
lines: i32 = 0,
columns: i32 = 0,
@@ -329,8 +330,24 @@ pub const LineColumnOffset = packed struct {
.value => this.value.advance(input),
}
}
+
+ pub fn reset(this: *Optional) void {
+ switch (this.*) {
+ .null => {},
+ .value => this.value = .{},
+ }
+ }
};
+ pub fn add(this: *LineColumnOffset, b: LineColumnOffset) void {
+ if (b.lines == 0) {
+ this.columns += b.columns;
+ } else {
+ this.lines += b.lines;
+ this.columns = b.columns;
+ }
+ }
+
pub fn advance(this: *LineColumnOffset, input: []const u8) void {
var columns = this.columns;
defer this.columns = columns;
@@ -339,8 +356,7 @@ pub const LineColumnOffset = packed struct {
std.debug.assert(i >= offset);
std.debug.assert(i < input.len);
- columns += @intCast(i32, i - offset);
- offset = i;
+ offset = i + 1;
var cp = strings.CodepointIterator.initOffset(input, offset);
var cursor = strings.CodepointIterator.Cursor{};
@@ -366,8 +382,10 @@ pub const LineColumnOffset = packed struct {
},
}
}
+ }
- columns += @intCast(i32, input.len - offset);
+ pub fn comesBefore(a: LineColumnOffset, b: LineColumnOffset) bool {
+ return a.lines < b.lines or (a.lines == b.lines and a.columns < b.columns);
}
pub fn cmp(_: void, a: LineColumnOffset, b: LineColumnOffset) std.math.Order {
@@ -392,10 +410,103 @@ pub fn find(
return Mapping.find(this.mapping, line, column);
}
+pub const SourceMapShifts = struct {
+ before: LineColumnOffset,
+ after: LineColumnOffset,
+};
+
pub const SourceMapPieces = struct {
prefix: std.ArrayList(u8),
mappings: std.ArrayList(u8),
suffix: std.ArrayList(u8),
+
+ pub fn init(allocator: std.mem.Allocator) SourceMapPieces {
+ return .{
+ .prefix = std.ArrayList(u8).init(allocator),
+ .mappings = std.ArrayList(u8).init(allocator),
+ .suffix = std.ArrayList(u8).init(allocator),
+ };
+ }
+
+ pub fn hasContent(this: *SourceMapPieces) bool {
+ return (this.prefix.items.len + this.mappings.items.len + this.suffix.items.len) > 0;
+ }
+
+ pub fn finalize(this: *SourceMapPieces, allocator: std.mem.Allocator, _shifts: []SourceMapShifts) ![]const u8 {
+ var shifts = _shifts;
+ var start_of_run: usize = 0;
+ var current: usize = 0;
+ var generated = LineColumnOffset{};
+ var prev_shift_column_delta: i32 = 0;
+ var j = Joiner{};
+
+ j.push(this.prefix.items);
+ const mappings = this.mappings.items;
+
+ while (current < mappings.len) {
+ if (mappings[current] == ';') {
+ generated.lines += 1;
+ generated.columns = 0;
+ prev_shift_column_delta = 0;
+ current += 1;
+ continue;
+ }
+
+ var potential_end_of_run = current;
+
+ var decode_result = decodeVLQ(mappings, current);
+ generated.columns += decode_result.value;
+ current = decode_result.start;
+
+ var potential_start_of_run = current;
+
+ current = decodeVLQ(mappings, current).start;
+ current = decodeVLQ(mappings, current).start;
+ current = decodeVLQ(mappings, current).start;
+
+ if (current < mappings.len) {
+ var c = mappings[current];
+ if (c != ',' and c != ';') {
+ current = decodeVLQ(mappings, current).start;
+ }
+ }
+
+ if (current < mappings.len and mappings[current] == ',') {
+ current += 1;
+ }
+
+ var did_cross_boundary = false;
+ if (shifts.len > 1 and shifts[1].before.comesBefore(generated)) {
+ shifts = shifts[1..];
+ did_cross_boundary = true;
+ }
+
+ if (!did_cross_boundary) {
+ continue;
+ }
+
+ var shift = shifts[0];
+ if (shift.after.lines != generated.lines) {
+ continue;
+ }
+
+ j.push(mappings[start_of_run..potential_end_of_run]);
+
+ std.debug.assert(shift.before.lines == shift.after.lines);
+
+ var shift_column_delta = shift.after.columns - shift.before.columns;
+ const encode = encodeVLQ(decode_result.value + shift_column_delta - prev_shift_column_delta);
+ j.push(encode.bytes[0..encode.len]);
+ prev_shift_column_delta = shift_column_delta;
+
+ start_of_run = potential_start_of_run;
+ }
+
+ j.push(mappings[start_of_run..]);
+ j.push(this.suffix.items);
+
+ return try j.done(allocator);
+ }
};
// -- comment from esbuild --
@@ -407,16 +518,16 @@ pub const SourceMapPieces = struct {
// After all chunks are computed, they are joined together in a second pass.
// This rewrites the first mapping in each chunk to be relative to the end
// state of the previous chunk.
-pub fn appendSourceMapChunk(j: *Joiner, prev_end_state_: SourceMapState, start_state_: SourceMapState, source_map_: MutableString) !void {
+pub fn appendSourceMapChunk(j: *Joiner, allocator: std.mem.Allocator, prev_end_state_: SourceMapState, start_state_: SourceMapState, source_map_: bun.string) !void {
var prev_end_state = prev_end_state_;
var start_state = start_state_;
// Handle line breaks in between this mapping and the previous one
if (start_state.generated_line > 0) {
- j.append(try strings.repeatingAlloc(source_map_.allocator, @intCast(usize, start_state.generated_line), ';'), 0, source_map_.allocator);
+ j.append(try strings.repeatingAlloc(allocator, @intCast(usize, start_state.generated_line), ';'), 0, allocator);
prev_end_state.generated_column = 0;
}
- var source_map = source_map_.list.items;
+ var source_map = source_map_;
if (strings.indexOfNotChar(source_map, ';')) |semicolons| {
j.append(source_map[0..semicolons], 0, null);
source_map = source_map[semicolons..];
@@ -448,13 +559,13 @@ pub fn appendSourceMapChunk(j: *Joiner, prev_end_state_: SourceMapState, start_s
start_state.original_column += original_column_.value;
j.append(
- appendMappingToBuffer(MutableString.initEmpty(source_map.allocator), j.lastByte(), prev_end_state, start_state).list.items,
+ appendMappingToBuffer(MutableString.initEmpty(allocator), j.lastByte(), prev_end_state, start_state).list.items,
0,
- source_map.allocator,
+ allocator,
);
// Then append everything after that without modification.
- j.append(source_map_.list.items, @truncate(u32, @ptrToInt(source_map.ptr) - @ptrToInt(source_map_.list.items.ptr)), source_map_.allocator);
+ j.push(source_map);
}
const vlq_lookup_table: [256]VLQ = brk: {
@@ -638,18 +749,14 @@ pub const LineOffsetTable = struct {
pub const List = std.MultiArrayList(LineOffsetTable);
- pub fn findLine(list: List, loc: Logger.Loc) i32 {
- const byte_offsets_to_start_of_line = list.items(.byte_offset_to_start_of_line);
- var original_line: u32 = 0;
- if (loc.start <= -1) {
- return 0;
- }
-
- const loc_start = @intCast(u32, loc.start);
+ pub fn findLine(byte_offsets_to_start_of_line: []const u32, loc: Logger.Loc) i32 {
+ std.debug.assert(loc.start > -1); // checked by caller
+ var original_line: usize = 0;
+ const loc_start = @intCast(usize, loc.start);
{
- var count = @truncate(u32, byte_offsets_to_start_of_line.len);
- var i: u32 = 0;
+ var count = @truncate(usize, byte_offsets_to_start_of_line.len);
+ var i: usize = 0;
while (count > 0) {
const step = count / 2;
i = original_line + step;
@@ -1015,6 +1122,8 @@ pub const Chunk = struct {
prev_loc: Logger.Loc = Logger.Loc.Empty,
has_prev_state: bool = false,
+ line_offset_table_byte_offset_list: []const u32 = &.{},
+
// This is a workaround for a bug in the popular "source-map" library:
// https://github.com/mozilla/source-map/issues/261. The library will
// sometimes return null when querying a source map unless every line
@@ -1031,7 +1140,7 @@ pub const Chunk = struct {
pub const SourceMapper = SourceMapFormat(SourceMapFormatType);
- pub fn generateChunk(b: *ThisBuilder, output: []const u8) Chunk {
+ pub noinline fn generateChunk(b: *ThisBuilder, output: []const u8) Chunk {
b.updateGeneratedLineAndColumn(output);
if (b.prepend_count) {
b.source_map.getBuffer().list.items[0..8].* = @bitCast([8]u8, b.source_map.getBuffer().list.items.len);
@@ -1042,7 +1151,7 @@ pub const Chunk = struct {
.mappings_count = b.source_map.getCount(),
.end_state = b.prev_state,
.final_generated_column = b.generated_column,
- .should_ignore = !b.source_map.shouldIgnore(),
+ .should_ignore = b.source_map.shouldIgnore(),
};
}
@@ -1144,7 +1253,7 @@ pub const Chunk = struct {
b.prev_loc = loc;
const list = b.line_offset_tables;
- const original_line = LineOffsetTable.findLine(list, loc);
+ const original_line = LineOffsetTable.findLine(b.line_offset_table_byte_offset_list, loc);
const line = list.get(@intCast(usize, @max(original_line, 0)));
// Use the line to compute the column
@@ -1183,3 +1292,29 @@ pub const Chunk = struct {
pub const Builder = NewBuilder(VLQSourceMap);
};
+
+/// https://sentry.engineering/blog/the-case-for-debug-ids
+/// https://github.com/mitsuhiko/source-map-rfc/blob/proposals/debug-id/proposals/debug-id.md
+/// https://github.com/source-map/source-map-rfc/pull/20
+pub const DebugIDFormatter = struct {
+ id: u64 = 0,
+
+ pub fn format(self: DebugIDFormatter, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void {
+ // The RFC asks for a UUID
+ // We are not generating a UUID, our hash is a 64-bit integer
+ // So we just print it like a UUID
+ // pretty sure this number is always 16 bytes
+ const formatter = bun.fmt.hexIntUpper(self.id);
+ const expected_length = "85314830023F4CF1A267535F4E37BB17".len;
+ var buf: [expected_length]u8 = undefined;
+
+ const wrote = std.fmt.bufPrint(&buf, "{}", .{formatter}) catch unreachable;
+ @memset(
+ buf[wrote.len..].ptr,
+ // fill the remaining with B
+ 'B',
+ buf.len - wrote.len,
+ );
+ try writer.writeAll(&buf);
+ }
+};
diff --git a/src/string_mutable.zig b/src/string_mutable.zig
index 7f81b5a74..9fdc3d090 100644
--- a/src/string_mutable.zig
+++ b/src/string_mutable.zig
@@ -25,6 +25,10 @@ pub const MutableString = struct {
};
}
+ pub fn isEmpty(this: *const MutableString) bool {
+ return this.list.items.len == 0;
+ }
+
pub fn deinit(str: *MutableString) void {
if (str.list.capacity > 0) {
str.list.expandToCapacity();
diff --git a/src/thread_pool.zig b/src/thread_pool.zig
index 9b6951cbb..02b508673 100644
--- a/src/thread_pool.zig
+++ b/src/thread_pool.zig
@@ -155,6 +155,10 @@ pub const WaitGroup = struct {
self.counter += 1;
}
+ pub fn isDone(this: *WaitGroup) bool {
+ return @atomicLoad(u32, &this.counter, .Monotonic) == 0;
+ }
+
pub fn finish(self: *WaitGroup) void {
self.mutex.lock();
defer self.mutex.unlock();